diff --git a/Cargo.lock b/Cargo.lock
index d787d2fe08c25b52ffe47db5ffa7ea8d1b404b8e..d053186970f00c3fc7aa32ac525a0ddb8fa47a30 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -12727,6 +12727,7 @@ dependencies = [
  "polkadot-node-subsystem-util",
  "polkadot-primitives",
  "polkadot-primitives-test-helpers",
+ "rstest",
  "sc-keystore",
  "sc-network",
  "sp-core",
@@ -13123,7 +13124,6 @@ dependencies = [
  "polkadot-node-subsystem-util",
  "polkadot-primitives",
  "polkadot-primitives-test-helpers",
- "rstest",
  "sc-keystore",
  "sp-application-crypto",
  "sp-core",
diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs
index 23acb0450944e8eedea437da545830174ed84449..a45edcbef52a91009e670cc5bb2c6a1a5923fa9e 100644
--- a/polkadot/node/core/backing/src/lib.rs
+++ b/polkadot/node/core/backing/src/lib.rs
@@ -30,7 +30,7 @@
 //! assigned group of validators may be backed on-chain and proceed to the availability
 //! stage.
 //!
-//! Depth is a concept relating to asynchronous backing, by which validators
+//! Depth is a concept relating to asynchronous backing, by which
 //! short sub-chains of candidates are backed and extended off-chain, and then placed
 //! asynchronously into blocks of the relay chain as those are authored and as the
 //! relay-chain state becomes ready for them. Asynchronous backing allows parachains to
@@ -66,7 +66,7 @@
 #![deny(unused_crate_dependencies)]
 
 use std::{
-	collections::{BTreeMap, HashMap, HashSet},
+	collections::{HashMap, HashSet},
 	sync::Arc,
 };
 
@@ -88,7 +88,7 @@ use polkadot_node_subsystem::{
 	messages::{
 		AvailabilityDistributionMessage, AvailabilityStoreMessage, CanSecondRequest,
 		CandidateBackingMessage, CandidateValidationMessage, CollatorProtocolMessage,
-		HypotheticalCandidate, HypotheticalFrontierRequest, IntroduceCandidateRequest,
+		HypotheticalCandidate, HypotheticalMembershipRequest, IntroduceSecondedCandidateRequest,
 		ProspectiveParachainsMessage, ProvisionableData, ProvisionerMessage, RuntimeApiMessage,
 		RuntimeApiRequest, StatementDistributionMessage, StoreAvailableDataError,
 	},
@@ -242,20 +242,44 @@ struct PerRelayParentState {
 struct PerCandidateState {
 	persisted_validation_data: PersistedValidationData,
 	seconded_locally: bool,
-	para_id: ParaId,
 	relay_parent: Hash,
 }
 
-struct ActiveLeafState {
-	prospective_parachains_mode: ProspectiveParachainsMode,
-	/// The candidates seconded at various depths under this active
-	/// leaf with respect to parachain id. A candidate can only be
-	/// seconded when its hypothetical frontier under every active leaf
-	/// has an empty entry in this map.
-	///
-	/// When prospective parachains are disabled, the only depth
-	/// which is allowed is 0.
-	seconded_at_depth: HashMap<ParaId, BTreeMap<usize, CandidateHash>>,
+enum ActiveLeafState {
+	// If prospective-parachains is disabled, one validator may only back one candidate per
+	// paraid.
+	ProspectiveParachainsDisabled { seconded: HashSet<ParaId> },
+	ProspectiveParachainsEnabled { max_candidate_depth: usize, allowed_ancestry_len: usize },
+}
+
+impl ActiveLeafState {
+	fn new(mode: ProspectiveParachainsMode) -> Self {
+		match mode {
+			ProspectiveParachainsMode::Disabled =>
+				Self::ProspectiveParachainsDisabled { seconded: HashSet::new() },
+			ProspectiveParachainsMode::Enabled { max_candidate_depth, allowed_ancestry_len } =>
+				Self::ProspectiveParachainsEnabled { max_candidate_depth, allowed_ancestry_len },
+		}
+	}
+
+	fn add_seconded_candidate(&mut self, para_id: ParaId) {
+		if let Self::ProspectiveParachainsDisabled { seconded } = self {
+			seconded.insert(para_id);
+		}
+	}
+}
+
+impl From<&ActiveLeafState> for ProspectiveParachainsMode {
+	fn from(state: &ActiveLeafState) -> Self {
+		match *state {
+			ActiveLeafState::ProspectiveParachainsDisabled { .. } =>
+				ProspectiveParachainsMode::Disabled,
+			ActiveLeafState::ProspectiveParachainsEnabled {
+				max_candidate_depth,
+				allowed_ancestry_len,
+			} => ProspectiveParachainsMode::Enabled { max_candidate_depth, allowed_ancestry_len },
+		}
+	}
 }
 
 /// The state of the subsystem.
@@ -277,11 +301,11 @@ struct State {
 	///      parachains.
 	///
 	/// Relay-chain blocks which don't support prospective parachains are
-	/// never included in the fragment trees of active leaves which do.
+	/// never included in the fragment chains of active leaves which do.
 	///
 	/// While it would be technically possible to support such leaves in
-	/// fragment trees, it only benefits the transition period when asynchronous
-	/// backing is being enabled and complicates code complexity.
+	/// fragment chains, it only benefits the transition period when asynchronous
+	/// backing is being enabled and complicates code.
 	per_relay_parent: HashMap<Hash, PerRelayParentState>,
 	/// State tracked for all candidates relevant to the implicit view.
 	///
@@ -864,17 +888,9 @@ async fn handle_active_leaves_update<Context>(
 				return Ok(())
 			}
 
-			state.per_leaf.insert(
-				leaf.hash,
-				ActiveLeafState {
-					prospective_parachains_mode: ProspectiveParachainsMode::Disabled,
-					// This is empty because the only allowed relay-parent and depth
-					// when prospective parachains are disabled is the leaf hash and 0,
-					// respectively. We've just learned about the leaf hash, so we cannot
-					// have any candidates seconded with it as a relay-parent yet.
-					seconded_at_depth: HashMap::new(),
-				},
-			);
+			state
+				.per_leaf
+				.insert(leaf.hash, ActiveLeafState::new(ProspectiveParachainsMode::Disabled));
 
 			(vec![leaf.hash], ProspectiveParachainsMode::Disabled)
 		},
@@ -882,63 +898,9 @@ async fn handle_active_leaves_update<Context>(
 			let fresh_relay_parents =
 				state.implicit_view.known_allowed_relay_parents_under(&leaf.hash, None);
 
-			// At this point, all candidates outside of the implicit view
-			// have been cleaned up. For all which remain, which we've seconded,
-			// we ask the prospective parachains subsystem where they land in the fragment
-			// tree for the given active leaf. This comprises our `seconded_at_depth`.
-
-			let remaining_seconded = state
-				.per_candidate
-				.iter()
-				.filter(|(_, cd)| cd.seconded_locally)
-				.map(|(c_hash, cd)| (*c_hash, cd.para_id));
-
-			// one-to-one correspondence to remaining_seconded
-			let mut membership_answers = FuturesOrdered::new();
-
-			for (candidate_hash, para_id) in remaining_seconded {
-				let (tx, rx) = oneshot::channel();
-				membership_answers
-					.push_back(rx.map_ok(move |membership| (para_id, candidate_hash, membership)));
-
-				ctx.send_message(ProspectiveParachainsMessage::GetTreeMembership(
-					para_id,
-					candidate_hash,
-					tx,
-				))
-				.await;
-			}
-
-			let mut seconded_at_depth = HashMap::new();
-			while let Some(response) = membership_answers.next().await {
-				match response {
-					Err(oneshot::Canceled) => {
-						gum::warn!(
-							target: LOG_TARGET,
-							"Prospective parachains subsystem unreachable for membership request",
-						);
-					},
-					Ok((para_id, candidate_hash, membership)) => {
-						// This request gives membership in all fragment trees. We have some
-						// wasted data here, and it can be optimized if it proves
-						// relevant to performance.
-						if let Some((_, depths)) =
-							membership.into_iter().find(|(leaf_hash, _)| leaf_hash == &leaf.hash)
-						{
-							let para_entry: &mut BTreeMap<usize, CandidateHash> =
-								seconded_at_depth.entry(para_id).or_default();
-							for depth in depths {
-								para_entry.insert(depth, candidate_hash);
-							}
-						}
-					},
-				}
-			}
+			let active_leaf_state = ActiveLeafState::new(prospective_parachains_mode);
 
-			state.per_leaf.insert(
-				leaf.hash,
-				ActiveLeafState { prospective_parachains_mode, seconded_at_depth },
-			);
+			state.per_leaf.insert(leaf.hash, active_leaf_state);
 
 			let fresh_relay_parent = match fresh_relay_parents {
 				Some(f) => f.to_vec(),
@@ -981,7 +943,7 @@ async fn handle_active_leaves_update<Context>(
 				// block itself did.
 				leaf_mode
 			},
-			Some(l) => l.prospective_parachains_mode,
+			Some(l) => l.into(),
 		};
 
 		// construct a `PerRelayParent` from the runtime API
@@ -1247,20 +1209,20 @@ async fn construct_per_relay_parent_state<Context>(
 
 enum SecondingAllowed {
 	No,
-	Yes(Vec<(Hash, Vec<usize>)>),
+	// On which leaves is seconding allowed.
+	Yes(Vec<Hash>),
 }
 
-/// Checks whether a candidate can be seconded based on its hypothetical frontiers in the fragment
-/// tree and what we've already seconded in all active leaves.
+/// Checks whether a candidate can be seconded based on its hypothetical membership in the fragment
+/// chain.
 #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
 async fn seconding_sanity_check<Context>(
 	ctx: &mut Context,
 	active_leaves: &HashMap<Hash, ActiveLeafState>,
 	implicit_view: &ImplicitView,
 	hypothetical_candidate: HypotheticalCandidate,
-	backed_in_path_only: bool,
 ) -> SecondingAllowed {
-	let mut membership = Vec::new();
+	let mut leaves_for_seconding = Vec::new();
 	let mut responses = FuturesOrdered::<BoxFuture<'_, Result<_, oneshot::Canceled>>>::new();
 
 	let candidate_para = hypothetical_candidate.candidate_para();
@@ -1268,7 +1230,7 @@ async fn seconding_sanity_check<Context>(
 	let candidate_hash = hypothetical_candidate.candidate_hash();
 
 	for (head, leaf_state) in active_leaves {
-		if leaf_state.prospective_parachains_mode.is_enabled() {
+		if ProspectiveParachainsMode::from(leaf_state).is_enabled() {
 			// Check that the candidate relay parent is allowed for para, skip the
 			// leaf otherwise.
 			let allowed_parents_for_para =
@@ -1278,40 +1240,36 @@ async fn seconding_sanity_check<Context>(
 			}
 
 			let (tx, rx) = oneshot::channel();
-			ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalFrontier(
-				HypotheticalFrontierRequest {
+			ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalMembership(
+				HypotheticalMembershipRequest {
 					candidates: vec![hypothetical_candidate.clone()],
-					fragment_tree_relay_parent: Some(*head),
-					backed_in_path_only,
+					fragment_chain_relay_parent: Some(*head),
 				},
 				tx,
 			))
 			.await;
-			let response = rx.map_ok(move |frontiers| {
-				let depths: Vec<usize> = frontiers
+			let response = rx.map_ok(move |candidate_memberships| {
+				let is_member_or_potential = candidate_memberships
 					.into_iter()
-					.flat_map(|(candidate, memberships)| {
-						debug_assert_eq!(candidate.candidate_hash(), candidate_hash);
-						memberships.into_iter().flat_map(|(relay_parent, depths)| {
-							debug_assert_eq!(relay_parent, *head);
-							depths
-						})
+					.find_map(|(candidate, leaves)| {
+						(candidate.candidate_hash() == candidate_hash).then_some(leaves)
 					})
-					.collect();
-				(depths, head, leaf_state)
+					.and_then(|leaves| leaves.into_iter().find(|leaf| leaf == head))
+					.is_some();
+
+				(is_member_or_potential, head)
 			});
 			responses.push_back(response.boxed());
 		} else {
 			if *head == candidate_relay_parent {
-				if leaf_state
-					.seconded_at_depth
-					.get(&candidate_para)
-					.map_or(false, |occupied| occupied.contains_key(&0))
-				{
-					// The leaf is already occupied.
-					return SecondingAllowed::No
+				if let ActiveLeafState::ProspectiveParachainsDisabled { seconded } = leaf_state {
+					if seconded.contains(&candidate_para) {
+						// The leaf is already occupied. For non-prospective parachains, we only
+						// second one candidate.
+						return SecondingAllowed::No
+					}
 				}
-				responses.push_back(futures::future::ok((vec![0], head, leaf_state)).boxed());
+				responses.push_back(futures::future::ok((true, head)).boxed());
 			}
 		}
 	}
@@ -1325,38 +1283,32 @@ async fn seconding_sanity_check<Context>(
 			Err(oneshot::Canceled) => {
 				gum::warn!(
 					target: LOG_TARGET,
-					"Failed to reach prospective parachains subsystem for hypothetical frontiers",
+					"Failed to reach prospective parachains subsystem for hypothetical membership",
 				);
 
 				return SecondingAllowed::No
 			},
-			Ok((depths, head, leaf_state)) => {
-				for depth in &depths {
-					if leaf_state
-						.seconded_at_depth
-						.get(&candidate_para)
-						.map_or(false, |occupied| occupied.contains_key(&depth))
-					{
-						gum::debug!(
-							target: LOG_TARGET,
-							?candidate_hash,
-							depth,
-							leaf_hash = ?head,
-							"Refusing to second candidate at depth - already occupied."
-						);
-
-						return SecondingAllowed::No
-					}
-				}
-
-				membership.push((*head, depths));
+			Ok((is_member_or_potential, head)) => match is_member_or_potential {
+				false => {
+					gum::debug!(
+						target: LOG_TARGET,
+						?candidate_hash,
+						leaf_hash = ?head,
+						"Refusing to second candidate at leaf. Is not a potential member.",
+					);
+				},
+				true => {
+					leaves_for_seconding.push(*head);
+				},
 			},
 		}
 	}
 
-	// At this point we've checked the depths of the candidate against all active
-	// leaves.
-	SecondingAllowed::Yes(membership)
+	if leaves_for_seconding.is_empty() {
+		SecondingAllowed::No
+	} else {
+		SecondingAllowed::Yes(leaves_for_seconding)
+	}
 }
 
 /// Performs seconding sanity check for an advertisement.
@@ -1385,16 +1337,12 @@ async fn handle_can_second_request<Context>(
 			&state.per_leaf,
 			&state.implicit_view,
 			hypothetical_candidate,
-			true,
 		)
 		.await;
 
 		match result {
 			SecondingAllowed::No => false,
-			SecondingAllowed::Yes(membership) => {
-				// Candidate should be recognized by at least some fragment tree.
-				membership.iter().any(|(_, m)| !m.is_empty())
-			},
+			SecondingAllowed::Yes(leaves) => !leaves.is_empty(),
 		}
 	} else {
 		// Relay parent is unknown or async backing is disabled.
@@ -1435,20 +1383,6 @@ async fn handle_validated_candidate_command<Context>(
 							commitments,
 						};
 
-						let parent_head_data_hash = persisted_validation_data.parent_head.hash();
-						// Note that `GetHypotheticalFrontier` doesn't account for recursion,
-						// i.e. candidates can appear at multiple depths in the tree and in fact
-						// at all depths, and we don't know what depths a candidate will ultimately
-						// occupy because that's dependent on other candidates we haven't yet
-						// received.
-						//
-						// The only way to effectively rule this out is to have candidate receipts
-						// directly commit to the parachain block number or some other incrementing
-						// counter. That requires a major primitives format upgrade, so for now
-						// we just rule out trivial cycles.
-						if parent_head_data_hash == receipt.commitments.head_data.hash() {
-							return Ok(())
-						}
 						let hypothetical_candidate = HypotheticalCandidate::Complete {
 							candidate_hash,
 							receipt: Arc::new(receipt.clone()),
@@ -1457,12 +1391,11 @@ async fn handle_validated_candidate_command<Context>(
 						// sanity check that we're allowed to second the candidate
 						// and that it doesn't conflict with other candidates we've
 						// seconded.
-						let fragment_tree_membership = match seconding_sanity_check(
+						let hypothetical_membership = match seconding_sanity_check(
 							ctx,
 							&state.per_leaf,
 							&state.implicit_view,
 							hypothetical_candidate,
-							false,
 						)
 						.await
 						{
@@ -1517,8 +1450,8 @@ async fn handle_validated_candidate_command<Context>(
 								Some(p) => p.seconded_locally = true,
 							}
 
-							// update seconded depths in active leaves.
-							for (leaf, depths) in fragment_tree_membership {
+							// record seconded candidates for non-prospective-parachains mode.
+							for leaf in hypothetical_membership {
 								let leaf_data = match state.per_leaf.get_mut(&leaf) {
 									None => {
 										gum::warn!(
@@ -1532,14 +1465,7 @@ async fn handle_validated_candidate_command<Context>(
 									Some(d) => d,
 								};
 
-								let seconded_at_depth = leaf_data
-									.seconded_at_depth
-									.entry(candidate.descriptor().para_id)
-									.or_default();
-
-								for depth in depths {
-									seconded_at_depth.insert(depth, candidate_hash);
-								}
+								leaf_data.add_seconded_candidate(candidate.descriptor().para_id);
 							}
 
 							rp_state.issued_statements.insert(candidate_hash);
@@ -1650,7 +1576,7 @@ fn sign_statement(
 /// and any of the following are true:
 /// 1. There is no `PersistedValidationData` attached.
 /// 2. Prospective parachains are enabled for the relay parent and the prospective parachains
-///    subsystem returned an empty `FragmentTreeMembership` i.e. did not recognize the candidate as
+///    subsystem returned an empty `HypotheticalMembership` i.e. did not recognize the candidate as
 ///    being applicable to any of the active leaves.
 #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
 async fn import_statement<Context>(
@@ -1686,8 +1612,8 @@ async fn import_statement<Context>(
 		if !per_candidate.contains_key(&candidate_hash) {
 			if rp_state.prospective_parachains_mode.is_enabled() {
 				let (tx, rx) = oneshot::channel();
-				ctx.send_message(ProspectiveParachainsMessage::IntroduceCandidate(
-					IntroduceCandidateRequest {
+				ctx.send_message(ProspectiveParachainsMessage::IntroduceSecondedCandidate(
+					IntroduceSecondedCandidateRequest {
 						candidate_para: candidate.descriptor().para_id,
 						candidate_receipt: candidate.clone(),
 						persisted_validation_data: pvd.clone(),
@@ -1705,17 +1631,9 @@ async fn import_statement<Context>(
 
 						return Err(Error::RejectedByProspectiveParachains)
 					},
-					Ok(membership) =>
-						if membership.is_empty() {
-							return Err(Error::RejectedByProspectiveParachains)
-						},
+					Ok(false) => return Err(Error::RejectedByProspectiveParachains),
+					Ok(true) => {},
 				}
-
-				ctx.send_message(ProspectiveParachainsMessage::CandidateSeconded(
-					candidate.descriptor().para_id,
-					candidate_hash,
-				))
-				.await;
 			}
 
 			// Only save the candidate if it was approved by prospective parachains.
@@ -1725,7 +1643,6 @@ async fn import_statement<Context>(
 					persisted_validation_data: pvd.clone(),
 					// This is set after importing when seconding locally.
 					seconded_locally: false,
-					para_id: candidate.descriptor().para_id,
 					relay_parent: candidate.descriptor().relay_parent,
 				},
 			);
@@ -1786,13 +1703,6 @@ async fn post_import_statement_actions<Context>(
 						candidate_hash,
 					))
 					.await;
-					// Backed candidate potentially unblocks new advertisements,
-					// notify collator protocol.
-					ctx.send_message(CollatorProtocolMessage::Backed {
-						para_id,
-						para_head: backed.candidate().descriptor.para_head,
-					})
-					.await;
 					// Notify statement distribution of backed candidate.
 					ctx.send_message(StatementDistributionMessage::Backed(candidate_hash)).await;
 				} else {
@@ -2016,7 +1926,7 @@ async fn maybe_validate_and_import<Context>(
 	if let Some(summary) = summary {
 		// import_statement already takes care of communicating with the
 		// prospective parachains subsystem. At this point, the candidate
-		// has already been accepted into the fragment trees.
+		// has already been accepted by the subsystem.
 
 		let candidate_hash = summary.candidate;
 
diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs
index 94310d2aa164650db84b78ddf361a9f465ac207d..8a72902f0815030bf2d5d13d3f04fdb1763254aa 100644
--- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs
+++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs
@@ -17,7 +17,7 @@
 //! Tests for the backing subsystem with enabled prospective parachains.
 
 use polkadot_node_subsystem::{
-	messages::{ChainApiMessage, FragmentTreeMembership},
+	messages::{ChainApiMessage, HypotheticalMembership},
 	ActivatedLeaf, TimeoutExt,
 };
 use polkadot_primitives::{AsyncBackingParams, BlockNumber, Header, OccupiedCore};
@@ -40,7 +40,6 @@ async fn activate_leaf(
 	virtual_overseer: &mut VirtualOverseer,
 	leaf: TestLeaf,
 	test_state: &TestState,
-	seconded_in_view: usize,
 ) {
 	let TestLeaf { activated, min_relay_parents } = leaf;
 	let leaf_hash = activated.hash;
@@ -122,21 +121,6 @@ async fn activate_leaf(
 		}
 	}
 
-	for _ in 0..seconded_in_view {
-		let msg = match next_overseer_message.take() {
-			Some(msg) => msg,
-			None => virtual_overseer.recv().await,
-		};
-		assert_matches!(
-			msg,
-			AllMessages::ProspectiveParachains(
-				ProspectiveParachainsMessage::GetTreeMembership(.., tx),
-			) => {
-				tx.send(Vec::new()).unwrap();
-			}
-		);
-	}
-
 	for (hash, number) in ancestry_iter.take(requested_len) {
 		let msg = match next_overseer_message.take() {
 			Some(msg) => msg,
@@ -297,11 +281,11 @@ async fn assert_validate_seconded_candidate(
 	);
 }
 
-async fn assert_hypothetical_frontier_requests(
+async fn assert_hypothetical_membership_requests(
 	virtual_overseer: &mut VirtualOverseer,
 	mut expected_requests: Vec<(
-		HypotheticalFrontierRequest,
-		Vec<(HypotheticalCandidate, FragmentTreeMembership)>,
+		HypotheticalMembershipRequest,
+		Vec<(HypotheticalCandidate, HypotheticalMembership)>,
 	)>,
 ) {
 	// Requests come with no particular order.
@@ -311,13 +295,13 @@ async fn assert_hypothetical_frontier_requests(
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::ProspectiveParachains(
-				ProspectiveParachainsMessage::GetHypotheticalFrontier(request, tx),
+				ProspectiveParachainsMessage::GetHypotheticalMembership(request, tx),
 			) => {
 				let idx = match expected_requests.iter().position(|r| r.0 == request) {
 					Some(idx) => idx,
 					None =>
 						panic!(
-						"unexpected hypothetical frontier request, no match found for {:?}",
+						"unexpected hypothetical membership request, no match found for {:?}",
 						request
 						),
 				};
@@ -330,18 +314,17 @@ async fn assert_hypothetical_frontier_requests(
 	}
 }
 
-fn make_hypothetical_frontier_response(
-	depths: Vec<usize>,
+fn make_hypothetical_membership_response(
 	hypothetical_candidate: HypotheticalCandidate,
 	relay_parent_hash: Hash,
-) -> Vec<(HypotheticalCandidate, FragmentTreeMembership)> {
-	vec![(hypothetical_candidate, vec![(relay_parent_hash, depths)])]
+) -> Vec<(HypotheticalCandidate, HypotheticalMembership)> {
+	vec![(hypothetical_candidate, vec![relay_parent_hash])]
 }
 
 // Test that `seconding_sanity_check` works when a candidate is allowed
 // for all leaves.
 #[test]
-fn seconding_sanity_check_allowed() {
+fn seconding_sanity_check_allowed_on_all() {
 	let test_state = TestState::default();
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
 		// Candidate is seconded in a parent of the activated `leaf_a`.
@@ -364,8 +347,8 @@ fn seconding_sanity_check_allowed() {
 		let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)];
 		let test_leaf_b = TestLeaf { activated, min_relay_parents };
 
-		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await;
-		activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state, 0).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state).await;
 
 		let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd = dummy_pvd();
@@ -412,24 +395,19 @@ fn seconding_sanity_check_allowed() {
 			receipt: Arc::new(candidate.clone()),
 			persisted_validation_data: pvd.clone(),
 		};
-		let expected_request_a = HypotheticalFrontierRequest {
+		let expected_request_a = HypotheticalMembershipRequest {
 			candidates: vec![hypothetical_candidate.clone()],
-			fragment_tree_relay_parent: Some(leaf_a_hash),
-			backed_in_path_only: false,
+			fragment_chain_relay_parent: Some(leaf_a_hash),
 		};
-		let expected_response_a = make_hypothetical_frontier_response(
-			vec![0, 1, 2, 3],
-			hypothetical_candidate.clone(),
-			leaf_a_hash,
-		);
-		let expected_request_b = HypotheticalFrontierRequest {
+		let expected_response_a =
+			make_hypothetical_membership_response(hypothetical_candidate.clone(), leaf_a_hash);
+		let expected_request_b = HypotheticalMembershipRequest {
 			candidates: vec![hypothetical_candidate.clone()],
-			fragment_tree_relay_parent: Some(leaf_b_hash),
-			backed_in_path_only: false,
+			fragment_chain_relay_parent: Some(leaf_b_hash),
 		};
 		let expected_response_b =
-			make_hypothetical_frontier_response(vec![3], hypothetical_candidate, leaf_b_hash);
-		assert_hypothetical_frontier_requests(
+			make_hypothetical_membership_response(hypothetical_candidate, leaf_b_hash);
+		assert_hypothetical_membership_requests(
 			&mut virtual_overseer,
 			vec![
 				(expected_request_a, expected_response_a),
@@ -441,7 +419,7 @@ fn seconding_sanity_check_allowed() {
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::ProspectiveParachains(
-				ProspectiveParachainsMessage::IntroduceCandidate(
+				ProspectiveParachainsMessage::IntroduceSecondedCandidate(
 					req,
 					tx,
 				),
@@ -449,19 +427,10 @@ fn seconding_sanity_check_allowed() {
 				req.candidate_receipt == candidate
 				&& req.candidate_para == para_id
 				&& pvd == req.persisted_validation_data => {
-				// Any non-empty response will do.
-				tx.send(vec![(leaf_a_hash, vec![0, 1, 2, 3])]).unwrap();
+				tx.send(true).unwrap();
 			}
 		);
 
-		assert_matches!(
-			virtual_overseer.recv().await,
-			AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded(
-				_,
-				_
-			))
-		);
-
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::StatementDistribution(
@@ -484,8 +453,8 @@ fn seconding_sanity_check_allowed() {
 	});
 }
 
-// Test that `seconding_sanity_check` works when a candidate is disallowed
-// for at least one leaf.
+// Test that `seconding_sanity_check` disallows seconding when a candidate is disallowed
+// for all leaves.
 #[test]
 fn seconding_sanity_check_disallowed() {
 	let test_state = TestState::default();
@@ -510,7 +479,7 @@ fn seconding_sanity_check_disallowed() {
 		let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)];
 		let test_leaf_b = TestLeaf { activated, min_relay_parents };
 
-		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await;
 
 		let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd = dummy_pvd();
@@ -557,17 +526,13 @@ fn seconding_sanity_check_disallowed() {
 			receipt: Arc::new(candidate.clone()),
 			persisted_validation_data: pvd.clone(),
 		};
-		let expected_request_a = HypotheticalFrontierRequest {
+		let expected_request_a = HypotheticalMembershipRequest {
 			candidates: vec![hypothetical_candidate.clone()],
-			fragment_tree_relay_parent: Some(leaf_a_hash),
-			backed_in_path_only: false,
+			fragment_chain_relay_parent: Some(leaf_a_hash),
 		};
-		let expected_response_a = make_hypothetical_frontier_response(
-			vec![0, 1, 2, 3],
-			hypothetical_candidate,
-			leaf_a_hash,
-		);
-		assert_hypothetical_frontier_requests(
+		let expected_response_a =
+			make_hypothetical_membership_response(hypothetical_candidate, leaf_a_hash);
+		assert_hypothetical_membership_requests(
 			&mut virtual_overseer,
 			vec![(expected_request_a, expected_response_a)],
 		)
@@ -576,7 +541,7 @@ fn seconding_sanity_check_disallowed() {
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::ProspectiveParachains(
-				ProspectiveParachainsMessage::IntroduceCandidate(
+				ProspectiveParachainsMessage::IntroduceSecondedCandidate(
 					req,
 					tx,
 				),
@@ -584,19 +549,10 @@ fn seconding_sanity_check_disallowed() {
 				req.candidate_receipt == candidate
 				&& req.candidate_para == para_id
 				&& pvd == req.persisted_validation_data => {
-				// Any non-empty response will do.
-				tx.send(vec![(leaf_a_hash, vec![0, 2, 3])]).unwrap();
+				tx.send(true).unwrap();
 			}
 		);
 
-		assert_matches!(
-			virtual_overseer.recv().await,
-			AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded(
-				_,
-				_
-			))
-		);
-
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::StatementDistribution(
@@ -615,10 +571,7 @@ fn seconding_sanity_check_disallowed() {
 			}
 		);
 
-		// A seconded candidate occupies a depth, try to second another one.
-		// It is allowed in a new leaf but not allowed in the old one.
-		// Expect it to be rejected.
-		activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state, 1).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state).await;
 		let leaf_a_grandparent = get_parent_hash(leaf_a_parent);
 		let candidate = TestCandidateBuilder {
 			para_id,
@@ -659,28 +612,20 @@ fn seconding_sanity_check_disallowed() {
 			receipt: Arc::new(candidate),
 			persisted_validation_data: pvd,
 		};
-		let expected_request_a = HypotheticalFrontierRequest {
+		let expected_request_a = HypotheticalMembershipRequest {
 			candidates: vec![hypothetical_candidate.clone()],
-			fragment_tree_relay_parent: Some(leaf_a_hash),
-			backed_in_path_only: false,
+			fragment_chain_relay_parent: Some(leaf_a_hash),
 		};
-		let expected_response_a = make_hypothetical_frontier_response(
-			vec![3],
-			hypothetical_candidate.clone(),
-			leaf_a_hash,
-		);
-		let expected_request_b = HypotheticalFrontierRequest {
+		let expected_empty_response = vec![(hypothetical_candidate.clone(), vec![])];
+		let expected_request_b = HypotheticalMembershipRequest {
 			candidates: vec![hypothetical_candidate.clone()],
-			fragment_tree_relay_parent: Some(leaf_b_hash),
-			backed_in_path_only: false,
+			fragment_chain_relay_parent: Some(leaf_b_hash),
 		};
-		let expected_response_b =
-			make_hypothetical_frontier_response(vec![1], hypothetical_candidate, leaf_b_hash);
-		assert_hypothetical_frontier_requests(
+		assert_hypothetical_membership_requests(
 			&mut virtual_overseer,
 			vec![
-				(expected_request_a, expected_response_a), // All depths are occupied.
-				(expected_request_b, expected_response_b),
+				(expected_request_a, expected_empty_response.clone()),
+				(expected_request_b, expected_empty_response),
 			],
 		)
 		.await;
@@ -695,6 +640,137 @@ fn seconding_sanity_check_disallowed() {
 	});
 }
 
+// Test that `seconding_sanity_check` allows seconding a candidate when it's allowed on at least one
+// leaf.
+#[test]
+fn seconding_sanity_check_allowed_on_at_least_one_leaf() {
+	let test_state = TestState::default();
+	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
+		// Candidate is seconded in a parent of the activated `leaf_a`.
+		const LEAF_A_BLOCK_NUMBER: BlockNumber = 100;
+		const LEAF_A_ANCESTRY_LEN: BlockNumber = 3;
+		let para_id = test_state.chain_ids[0];
+
+		// `a` is grandparent of `b`.
+		let leaf_a_hash = Hash::from_low_u64_be(130);
+		let leaf_a_parent = get_parent_hash(leaf_a_hash);
+		let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER);
+		let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)];
+		let test_leaf_a = TestLeaf { activated, min_relay_parents };
+
+		const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2;
+		const LEAF_B_ANCESTRY_LEN: BlockNumber = 4;
+
+		let leaf_b_hash = Hash::from_low_u64_be(128);
+		let activated = new_leaf(leaf_b_hash, LEAF_B_BLOCK_NUMBER);
+		let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)];
+		let test_leaf_b = TestLeaf { activated, min_relay_parents };
+
+		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state).await;
+
+		let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
+		let pvd = dummy_pvd();
+		let validation_code = ValidationCode(vec![1, 2, 3]);
+
+		let expected_head_data = test_state.head_data.get(&para_id).unwrap();
+
+		let pov_hash = pov.hash();
+		let candidate = TestCandidateBuilder {
+			para_id,
+			relay_parent: leaf_a_parent,
+			pov_hash,
+			head_data: expected_head_data.clone(),
+			erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()),
+			persisted_validation_data_hash: pvd.hash(),
+			validation_code: validation_code.0.clone(),
+		}
+		.build();
+
+		let second = CandidateBackingMessage::Second(
+			leaf_a_hash,
+			candidate.to_plain(),
+			pvd.clone(),
+			pov.clone(),
+		);
+
+		virtual_overseer.send(FromOrchestra::Communication { msg: second }).await;
+
+		assert_validate_seconded_candidate(
+			&mut virtual_overseer,
+			leaf_a_parent,
+			&candidate,
+			&pov,
+			&pvd,
+			&validation_code,
+			expected_head_data,
+			false,
+		)
+		.await;
+
+		// `seconding_sanity_check`
+		let hypothetical_candidate = HypotheticalCandidate::Complete {
+			candidate_hash: candidate.hash(),
+			receipt: Arc::new(candidate.clone()),
+			persisted_validation_data: pvd.clone(),
+		};
+		let expected_request_a = HypotheticalMembershipRequest {
+			candidates: vec![hypothetical_candidate.clone()],
+			fragment_chain_relay_parent: Some(leaf_a_hash),
+		};
+		let expected_response_a =
+			make_hypothetical_membership_response(hypothetical_candidate.clone(), leaf_a_hash);
+		let expected_request_b = HypotheticalMembershipRequest {
+			candidates: vec![hypothetical_candidate.clone()],
+			fragment_chain_relay_parent: Some(leaf_b_hash),
+		};
+		let expected_response_b = vec![(hypothetical_candidate.clone(), vec![])];
+		assert_hypothetical_membership_requests(
+			&mut virtual_overseer,
+			vec![
+				(expected_request_a, expected_response_a),
+				(expected_request_b, expected_response_b),
+			],
+		)
+		.await;
+		// Prospective parachains are notified.
+		assert_matches!(
+			virtual_overseer.recv().await,
+			AllMessages::ProspectiveParachains(
+				ProspectiveParachainsMessage::IntroduceSecondedCandidate(
+					req,
+					tx,
+				),
+			) if
+				req.candidate_receipt == candidate
+				&& req.candidate_para == para_id
+				&& pvd == req.persisted_validation_data => {
+				tx.send(true).unwrap();
+			}
+		);
+
+		assert_matches!(
+			virtual_overseer.recv().await,
+			AllMessages::StatementDistribution(
+				StatementDistributionMessage::Share(
+					parent_hash,
+					_signed_statement,
+				)
+			) if parent_hash == leaf_a_parent => {}
+		);
+
+		assert_matches!(
+			virtual_overseer.recv().await,
+			AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => {
+				assert_eq!(leaf_a_parent, hash);
+				assert_matches!(statement.payload(), Statement::Seconded(_));
+			}
+		);
+
+		virtual_overseer
+	});
+}
+
 // Test that a seconded candidate which is not approved by prospective parachains
 // subsystem doesn't change the view.
 #[test]
@@ -712,7 +788,7 @@ fn prospective_parachains_reject_candidate() {
 		let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)];
 		let test_leaf_a = TestLeaf { activated, min_relay_parents };
 
-		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await;
 
 		let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd = dummy_pvd();
@@ -760,25 +836,20 @@ fn prospective_parachains_reject_candidate() {
 			persisted_validation_data: pvd.clone(),
 		};
 		let expected_request_a = vec![(
-			HypotheticalFrontierRequest {
+			HypotheticalMembershipRequest {
 				candidates: vec![hypothetical_candidate.clone()],
-				fragment_tree_relay_parent: Some(leaf_a_hash),
-				backed_in_path_only: false,
+				fragment_chain_relay_parent: Some(leaf_a_hash),
 			},
-			make_hypothetical_frontier_response(
-				vec![0, 1, 2, 3],
-				hypothetical_candidate,
-				leaf_a_hash,
-			),
+			make_hypothetical_membership_response(hypothetical_candidate, leaf_a_hash),
 		)];
-		assert_hypothetical_frontier_requests(&mut virtual_overseer, expected_request_a.clone())
+		assert_hypothetical_membership_requests(&mut virtual_overseer, expected_request_a.clone())
 			.await;
 
 		// Prospective parachains are notified.
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::ProspectiveParachains(
-				ProspectiveParachainsMessage::IntroduceCandidate(
+				ProspectiveParachainsMessage::IntroduceSecondedCandidate(
 					req,
 					tx,
 				),
@@ -787,7 +858,7 @@ fn prospective_parachains_reject_candidate() {
 				&& req.candidate_para == para_id
 				&& pvd == req.persisted_validation_data => {
 				// Reject it.
-				tx.send(Vec::new()).unwrap();
+				tx.send(false).unwrap();
 			}
 		);
 
@@ -825,12 +896,12 @@ fn prospective_parachains_reject_candidate() {
 		.await;
 
 		// `seconding_sanity_check`
-		assert_hypothetical_frontier_requests(&mut virtual_overseer, expected_request_a).await;
+		assert_hypothetical_membership_requests(&mut virtual_overseer, expected_request_a).await;
 		// Prospective parachains are notified.
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::ProspectiveParachains(
-				ProspectiveParachainsMessage::IntroduceCandidate(
+				ProspectiveParachainsMessage::IntroduceSecondedCandidate(
 					req,
 					tx,
 				),
@@ -838,19 +909,10 @@ fn prospective_parachains_reject_candidate() {
 				req.candidate_receipt == candidate
 				&& req.candidate_para == para_id
 				&& pvd == req.persisted_validation_data => {
-				// Any non-empty response will do.
-				tx.send(vec![(leaf_a_hash, vec![0, 2, 3])]).unwrap();
+				tx.send(true).unwrap();
 			}
 		);
 
-		assert_matches!(
-			virtual_overseer.recv().await,
-			AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded(
-				_,
-				_
-			))
-		);
-
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::StatementDistribution(
@@ -890,7 +952,7 @@ fn second_multiple_candidates_per_relay_parent() {
 		let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)];
 		let test_leaf_a = TestLeaf { activated, min_relay_parents };
 
-		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await;
 
 		let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd = dummy_pvd();
@@ -911,12 +973,10 @@ fn second_multiple_candidates_per_relay_parent() {
 		let mut candidate_b = candidate_a.clone();
 		candidate_b.relay_parent = leaf_grandparent;
 
-		// With depths.
-		let candidate_a = (candidate_a.build(), 1);
-		let candidate_b = (candidate_b.build(), 2);
+		let candidate_a = candidate_a.build();
+		let candidate_b = candidate_b.build();
 
 		for candidate in &[candidate_a, candidate_b] {
-			let (candidate, depth) = candidate;
 			let second = CandidateBackingMessage::Second(
 				leaf_hash,
 				candidate.to_plain(),
@@ -945,46 +1005,33 @@ fn second_multiple_candidates_per_relay_parent() {
 				persisted_validation_data: pvd.clone(),
 			};
 			let expected_request_a = vec![(
-				HypotheticalFrontierRequest {
+				HypotheticalMembershipRequest {
 					candidates: vec![hypothetical_candidate.clone()],
-					fragment_tree_relay_parent: Some(leaf_hash),
-					backed_in_path_only: false,
+					fragment_chain_relay_parent: Some(leaf_hash),
 				},
-				make_hypothetical_frontier_response(
-					vec![*depth],
-					hypothetical_candidate,
-					leaf_hash,
-				),
+				make_hypothetical_membership_response(hypothetical_candidate, leaf_hash),
 			)];
-			assert_hypothetical_frontier_requests(
+			assert_hypothetical_membership_requests(
 				&mut virtual_overseer,
 				expected_request_a.clone(),
 			)
 			.await;
 
 			// Prospective parachains are notified.
-			assert_matches!(
-						   virtual_overseer.recv().await,
-						   AllMessages::ProspectiveParachains(
-							   ProspectiveParachainsMessage::IntroduceCandidate(
-								   req,
-								   tx,
-							   ),
-						   ) if
-							   &req.candidate_receipt == candidate
-							   && req.candidate_para == para_id
-							   && pvd == req.persisted_validation_data
-			=> {
-							   // Any non-empty response will do.
-							   tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap();
-						   }
-					   );
-
 			assert_matches!(
 				virtual_overseer.recv().await,
 				AllMessages::ProspectiveParachains(
-					ProspectiveParachainsMessage::CandidateSeconded(_, _)
-				)
+					ProspectiveParachainsMessage::IntroduceSecondedCandidate(
+						req,
+						tx,
+					),
+				) if
+					&req.candidate_receipt == candidate
+					&& req.candidate_para == para_id
+					&& pvd == req.persisted_validation_data
+				=> {
+					tx.send(true).unwrap();
+				}
 			);
 
 			assert_matches!(
@@ -1026,7 +1073,7 @@ fn backing_works() {
 		let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)];
 		let test_leaf_a = TestLeaf { activated, min_relay_parents };
 
-		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await;
 
 		let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd = dummy_pvd();
@@ -1048,7 +1095,6 @@ fn backing_works() {
 		.build();
 
 		let candidate_a_hash = candidate_a.hash();
-		let candidate_a_para_head = candidate_a.descriptor().para_head;
 
 		let public1 = Keystore::sr25519_generate_new(
 			&*test_state.keystore,
@@ -1096,7 +1142,7 @@ fn backing_works() {
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::ProspectiveParachains(
-				ProspectiveParachainsMessage::IntroduceCandidate(
+				ProspectiveParachainsMessage::IntroduceSecondedCandidate(
 					req,
 					tx,
 				),
@@ -1104,19 +1150,10 @@ fn backing_works() {
 				req.candidate_receipt == candidate_a
 				&& req.candidate_para == para_id
 				&& pvd == req.persisted_validation_data => {
-				// Any non-empty response will do.
-				tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap();
+				tx.send(true).unwrap();
 			}
 		);
 
-		assert_matches!(
-			virtual_overseer.recv().await,
-			AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded(
-				_,
-				_
-			))
-		);
-
 		assert_validate_seconded_candidate(
 			&mut virtual_overseer,
 			candidate_a.descriptor().relay_parent,
@@ -1147,13 +1184,6 @@ fn backing_works() {
 				),
 			) if candidate_a_hash == candidate_hash && candidate_para_id == para_id
 		);
-		assert_matches!(
-			virtual_overseer.recv().await,
-			AllMessages::CollatorProtocol(CollatorProtocolMessage::Backed {
-				para_id: _para_id,
-				para_head,
-			}) if para_id == _para_id && candidate_a_para_head == para_head
-		);
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::StatementDistribution(StatementDistributionMessage::Backed (
@@ -1187,7 +1217,7 @@ fn concurrent_dependent_candidates() {
 		let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)];
 		let test_leaf_a = TestLeaf { activated, min_relay_parents };
 
-		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await;
 
 		let head_data = &[
 			HeadData(vec![10, 20, 30]), // Before `a`.
@@ -1299,13 +1329,10 @@ fn concurrent_dependent_candidates() {
 			// Order is not guaranteed since we have 2 statements being handled concurrently.
 			match msg {
 				AllMessages::ProspectiveParachains(
-					ProspectiveParachainsMessage::IntroduceCandidate(_, tx),
+					ProspectiveParachainsMessage::IntroduceSecondedCandidate(_, tx),
 				) => {
-					tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap();
+					tx.send(true).unwrap();
 				},
-				AllMessages::ProspectiveParachains(
-					ProspectiveParachainsMessage::CandidateSeconded(_, _),
-				) => {},
 				AllMessages::RuntimeApi(RuntimeApiMessage::Request(
 					_,
 					RuntimeApiRequest::ValidationCodeByHash(_, tx),
@@ -1362,7 +1389,6 @@ fn concurrent_dependent_candidates() {
 				AllMessages::ProspectiveParachains(
 					ProspectiveParachainsMessage::CandidateBacked(..),
 				) => {},
-				AllMessages::CollatorProtocol(CollatorProtocolMessage::Backed { .. }) => {},
 				AllMessages::StatementDistribution(StatementDistributionMessage::Share(
 					_,
 					statement,
@@ -1447,7 +1473,7 @@ fn seconding_sanity_check_occupy_same_depth() {
 		let min_relay_parents = vec![(para_id_a, min_block_number), (para_id_b, min_block_number)];
 		let test_leaf_a = TestLeaf { activated, min_relay_parents };
 
-		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await;
 
 		let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd = dummy_pvd();
@@ -1506,44 +1532,35 @@ fn seconding_sanity_check_occupy_same_depth() {
 				persisted_validation_data: pvd.clone(),
 			};
 			let expected_request_a = vec![(
-				HypotheticalFrontierRequest {
+				HypotheticalMembershipRequest {
 					candidates: vec![hypothetical_candidate.clone()],
-					fragment_tree_relay_parent: Some(leaf_hash),
-					backed_in_path_only: false,
+					fragment_chain_relay_parent: Some(leaf_hash),
 				},
 				// Send the same membership for both candidates.
-				make_hypothetical_frontier_response(vec![0, 1], hypothetical_candidate, leaf_hash),
+				make_hypothetical_membership_response(hypothetical_candidate, leaf_hash),
 			)];
 
-			assert_hypothetical_frontier_requests(
+			assert_hypothetical_membership_requests(
 				&mut virtual_overseer,
 				expected_request_a.clone(),
 			)
 			.await;
 
 			// Prospective parachains are notified.
-			assert_matches!(
-						   virtual_overseer.recv().await,
-						   AllMessages::ProspectiveParachains(
-							   ProspectiveParachainsMessage::IntroduceCandidate(
-								   req,
-								   tx,
-							   ),
-						   ) if
-							   &req.candidate_receipt == candidate
-							   && &req.candidate_para == para_id
-							   && pvd == req.persisted_validation_data
-			=> {
-							   // Any non-empty response will do.
-							   tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap();
-						   }
-					   );
-
 			assert_matches!(
 				virtual_overseer.recv().await,
 				AllMessages::ProspectiveParachains(
-					ProspectiveParachainsMessage::CandidateSeconded(_, _)
-				)
+					ProspectiveParachainsMessage::IntroduceSecondedCandidate(
+						req,
+						tx,
+					),
+				) if
+					&req.candidate_receipt == candidate
+					&& &req.candidate_para == para_id
+					&& pvd == req.persisted_validation_data
+				=> {
+					tx.send(true).unwrap();
+				}
 			);
 
 			assert_matches!(
@@ -1600,7 +1617,7 @@ fn occupied_core_assignment() {
 		let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)];
 		let test_leaf_a = TestLeaf { activated, min_relay_parents };
 
-		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await;
 
 		let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd = dummy_pvd();
@@ -1648,23 +1665,18 @@ fn occupied_core_assignment() {
 			persisted_validation_data: pvd.clone(),
 		};
 		let expected_request = vec![(
-			HypotheticalFrontierRequest {
+			HypotheticalMembershipRequest {
 				candidates: vec![hypothetical_candidate.clone()],
-				fragment_tree_relay_parent: Some(leaf_a_hash),
-				backed_in_path_only: false,
+				fragment_chain_relay_parent: Some(leaf_a_hash),
 			},
-			make_hypothetical_frontier_response(
-				vec![0, 1, 2, 3],
-				hypothetical_candidate,
-				leaf_a_hash,
-			),
+			make_hypothetical_membership_response(hypothetical_candidate, leaf_a_hash),
 		)];
-		assert_hypothetical_frontier_requests(&mut virtual_overseer, expected_request).await;
+		assert_hypothetical_membership_requests(&mut virtual_overseer, expected_request).await;
 		// Prospective parachains are notified.
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::ProspectiveParachains(
-				ProspectiveParachainsMessage::IntroduceCandidate(
+				ProspectiveParachainsMessage::IntroduceSecondedCandidate(
 					req,
 					tx,
 				),
@@ -1673,19 +1685,10 @@ fn occupied_core_assignment() {
 				&& req.candidate_para == para_id
 				&& pvd == req.persisted_validation_data
 			=> {
-				// Any non-empty response will do.
-				tx.send(vec![(leaf_a_hash, vec![0, 1, 2, 3])]).unwrap();
+				tx.send(true).unwrap();
 			}
 		);
 
-		assert_matches!(
-			virtual_overseer.recv().await,
-			AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded(
-				_,
-				_
-			))
-		);
-
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::StatementDistribution(
diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml
index ab3cef99e54ff6da279ec30728c308ed8bbf3da6..80cd384ae0a40f1a66a676fe61d84aa619851cda 100644
--- a/polkadot/node/core/prospective-parachains/Cargo.toml
+++ b/polkadot/node/core/prospective-parachains/Cargo.toml
@@ -23,7 +23,6 @@ polkadot-node-subsystem = { path = "../../subsystem" }
 polkadot-node-subsystem-util = { path = "../../subsystem-util" }
 
 [dev-dependencies]
-rstest = "0.18.2"
 assert_matches = "1"
 polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" }
 polkadot-node-subsystem-types = { path = "../../subsystem-types" }
diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs
index 86814b976d13424281203aa5bca59b0a918b7251..f87d4820ff9af242bf28ca7170527ec72d1963b8 100644
--- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs
+++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs
@@ -14,35 +14,49 @@
 // You should have received a copy of the GNU General Public License
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
-//! A tree utility for managing parachain fragments not referenced by the relay-chain.
+//! Utility for managing parachain fragments not referenced by the relay-chain.
 //!
 //! # Overview
 //!
-//! This module exposes two main types: [`FragmentTree`] and [`CandidateStorage`] which are meant to
-//! be used in close conjunction. Each fragment tree is associated with a particular relay-parent
-//! and each node in the tree represents a candidate. Each parachain has a single candidate storage,
-//! but can have multiple trees for each relay chain block in the view.
+//! This module exposes two main types: [`FragmentChain`] and [`CandidateStorage`] which are meant
+//! to be used in close conjunction. Each fragment chain is associated with a particular
+//! relay-parent and each node in the chain represents a candidate. Each parachain has a single
+//! candidate storage, but can have one chain for each relay chain block in the view.
+//! Therefore, the same candidate can be present in multiple fragment chains of a parachain. One of
+//! the purposes of the candidate storage is to deduplicate the large candidate data that is being
+//! referenced from multiple fragment chains.
 //!
-//! A tree has an associated [`Scope`] which defines limits on candidates within the tree.
+//! A chain has an associated [`Scope`] which defines limits on candidates within the chain.
 //! Candidates themselves have their own [`Constraints`] which are either the constraints from the
-//! scope, or, if there are previous nodes in the tree, a modified version of the previous
+//! scope, or, if there are previous nodes in the chain, a modified version of the previous
 //! candidate's constraints.
 //!
+//! Another use of the `CandidateStorage` is to keep a record of candidates which may not be yet
+//! included in any chain, but which may become part of a chain in the future. This is needed for
+//! elastic scaling, so that we may parallelise the backing process across different groups. As long
+//! as some basic constraints are not violated by an unconnected candidate (like the relay parent
+//! being in scope), we proceed with the backing process, hoping that its predecessors will be
+//! backed soon enough. This is commonly called a potential candidate. Note that not all potential
+//! candidates will be maintained in the CandidateStorage. The total number of connected + potential
+//! candidates will be at most max_candidate_depth + 1.
+//!
 //! This module also makes use of types provided by the Inclusion Emulator module, such as
 //! [`Fragment`] and [`Constraints`]. These perform the actual job of checking for validity of
 //! prospective fragments.
 //!
-//! # Usage
+//! # Parachain forks
 //!
-//! It's expected that higher-level code will have a tree for each relay-chain block which might
-//! reasonably have blocks built upon it.
+//! Parachains are expected to not create forks, hence the use of fragment chains as opposed to
+//! fragment trees. If parachains do create forks, their performance in regards to async backing and
+//! elastic scaling will suffer, because different validators will have different views of the
+//! future.
 //!
-//! Because a para only has a single candidate storage, trees only store indices into the storage.
-//! The storage is meant to be pruned when trees are dropped by higher-level code.
+//! This is a compromise we can make - collators which want to use async backing and elastic scaling
+//! need to cooperate for the highest throughput.
 //!
-//! # Cycles
+//! # Parachain cycles
 //!
-//! Nodes do not uniquely refer to a parachain block for two reasons.
+//! Parachains can create cycles, because:
 //!   1. There's no requirement that head-data is unique for a parachain. Furthermore, a parachain
 //!      is under no obligation to be acyclic, and this is mostly just because it's totally
 //!      inefficient to enforce it. Practical use-cases are acyclic, but there is still more than
@@ -50,34 +64,17 @@
 //!   2. and candidates only refer to their parent by its head-data. This whole issue could be
 //!      resolved by having candidates reference their parent by candidate hash.
 //!
-//! The implication is that when we receive a candidate receipt, there are actually multiple
-//! possibilities for any candidates between the para-head recorded in the relay parent's state
-//! and the candidate in question.
-//!
-//! This means that our candidates need to handle multiple parents and that depth is an
-//! attribute of a node in a tree, not a candidate. Put another way, the same candidate might
-//! have different depths in different parts of the tree.
+//! However, dealing with cycles increases complexity during the backing/inclusion process for no
+//! practical reason. Therefore, fragment chains will not accept such candidates.
 //!
-//! As an extreme example, a candidate which produces head-data which is the same as its parent
-//! can correspond to multiple nodes within the same [`FragmentTree`]. Such cycles are bounded
-//! by the maximum depth allowed by the tree. An example with `max_depth: 4`:
+//! On the other hand, enforcing that a parachain will NEVER be acyclic would be very complicated
+//! (looping through the entire parachain's history on every new candidate or changing the candidate
+//! receipt to reference the parent's candidate hash).
 //!
-//! ```text
-//!           committed head
-//!                  |
-//! depth 0:      head_a
-//!                  |
-//! depth 1:      head_b
-//!                  |
-//! depth 2:      head_a
-//!                  |
-//! depth 3:      head_b
-//!                  |
-//! depth 4:      head_a
-//! ```
+//! # Spam protection
 //!
 //! As long as the [`CandidateStorage`] has bounded input on the number of candidates supplied,
-//! [`FragmentTree`] complexity is bounded. This means that higher-level code needs to be selective
+//! [`FragmentChain`] complexity is bounded. This means that higher-level code needs to be selective
 //! about limiting the amount of candidates that are considered.
 //!
 //! The code in this module is not designed for speed or efficiency, but conceptual simplicity.
@@ -90,16 +87,15 @@
 mod tests;
 
 use std::{
-	borrow::Cow,
 	collections::{
 		hash_map::{Entry, HashMap},
 		BTreeMap, HashSet,
 	},
+	sync::Arc,
 };
 
 use super::LOG_TARGET;
-use bitvec::prelude::*;
-use polkadot_node_subsystem::messages::Ancestors;
+use polkadot_node_subsystem::messages::{Ancestors, HypotheticalCandidate};
 use polkadot_node_subsystem_util::inclusion_emulator::{
 	ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo,
 };
@@ -120,11 +116,19 @@ pub enum CandidateStorageInsertionError {
 
 /// Stores candidates and information about them such as their relay-parents and their backing
 /// states.
+#[derive(Clone, Default)]
 pub(crate) struct CandidateStorage {
-	// Index from head data hash to candidate hashes with that head data as a parent.
+	// Index from head data hash to candidate hashes with that head data as a parent. Purely for
+	// efficiency when responding to `ProspectiveValidationDataRequest`s or when trying to find a
+	// new candidate to push to a chain.
+	// Even though having multiple candidates with same parent would be invalid for a parachain, it
+	// could happen across different relay chain forks, hence the HashSet.
 	by_parent_head: HashMap<Hash, HashSet<CandidateHash>>,
 
-	// Index from head data hash to candidate hashes outputting that head data.
+	// Index from head data hash to candidate hashes outputting that head data. Purely for
+	// efficiency when responding to `ProspectiveValidationDataRequest`s.
+	// Even though having multiple candidates with same output would be invalid for a parachain,
+	// it could happen across different relay chain forks.
 	by_output_head: HashMap<Hash, HashSet<CandidateHash>>,
 
 	// Index from candidate hash to fragment node.
@@ -132,23 +136,14 @@ pub(crate) struct CandidateStorage {
 }
 
 impl CandidateStorage {
-	/// Create a new `CandidateStorage`.
-	pub fn new() -> Self {
-		CandidateStorage {
-			by_parent_head: HashMap::new(),
-			by_output_head: HashMap::new(),
-			by_candidate_hash: HashMap::new(),
-		}
-	}
-
 	/// Introduce a new candidate.
 	pub fn add_candidate(
 		&mut self,
 		candidate: CommittedCandidateReceipt,
 		persisted_validation_data: PersistedValidationData,
+		state: CandidateState,
 	) -> Result<CandidateHash, CandidateStorageInsertionError> {
 		let candidate_hash = candidate.hash();
-
 		if self.by_candidate_hash.contains_key(&candidate_hash) {
 			return Err(CandidateStorageInsertionError::CandidateAlreadyKnown(candidate_hash))
 		}
@@ -157,24 +152,30 @@ impl CandidateStorage {
 			return Err(CandidateStorageInsertionError::PersistedValidationDataMismatch)
 		}
 
-		let parent_head_hash = persisted_validation_data.parent_head.hash();
-		let output_head_hash = candidate.commitments.head_data.hash();
 		let entry = CandidateEntry {
 			candidate_hash,
+			parent_head_data_hash: persisted_validation_data.parent_head.hash(),
+			output_head_data_hash: candidate.commitments.head_data.hash(),
 			relay_parent: candidate.descriptor.relay_parent,
-			state: CandidateState::Introduced,
-			candidate: ProspectiveCandidate {
-				commitments: Cow::Owned(candidate.commitments),
+			state,
+			candidate: Arc::new(ProspectiveCandidate {
+				commitments: candidate.commitments,
 				collator: candidate.descriptor.collator,
 				collator_signature: candidate.descriptor.signature,
 				persisted_validation_data,
 				pov_hash: candidate.descriptor.pov_hash,
 				validation_code_hash: candidate.descriptor.validation_code_hash,
-			},
+			}),
 		};
 
-		self.by_parent_head.entry(parent_head_hash).or_default().insert(candidate_hash);
-		self.by_output_head.entry(output_head_hash).or_default().insert(candidate_hash);
+		self.by_parent_head
+			.entry(entry.parent_head_data_hash())
+			.or_default()
+			.insert(candidate_hash);
+		self.by_output_head
+			.entry(entry.output_head_data_hash())
+			.or_default()
+			.insert(candidate_hash);
 		// sanity-checked already.
 		self.by_candidate_hash.insert(candidate_hash, entry);
 
@@ -184,21 +185,20 @@ impl CandidateStorage {
 	/// Remove a candidate from the store.
 	pub fn remove_candidate(&mut self, candidate_hash: &CandidateHash) {
 		if let Some(entry) = self.by_candidate_hash.remove(candidate_hash) {
-			let parent_head_hash = entry.candidate.persisted_validation_data.parent_head.hash();
-			if let Entry::Occupied(mut e) = self.by_parent_head.entry(parent_head_hash) {
+			if let Entry::Occupied(mut e) = self.by_parent_head.entry(entry.parent_head_data_hash())
+			{
 				e.get_mut().remove(&candidate_hash);
 				if e.get().is_empty() {
 					e.remove();
 				}
 			}
-		}
-	}
 
-	/// Note that an existing candidate has been seconded.
-	pub fn mark_seconded(&mut self, candidate_hash: &CandidateHash) {
-		if let Some(entry) = self.by_candidate_hash.get_mut(candidate_hash) {
-			if entry.state != CandidateState::Backed {
-				entry.state = CandidateState::Seconded;
+			if let Entry::Occupied(mut e) = self.by_output_head.entry(entry.output_head_data_hash())
+			{
+				e.get_mut().remove(&candidate_hash);
+				if e.get().is_empty() {
+					e.remove();
+				}
 			}
 		}
 	}
@@ -225,6 +225,11 @@ impl CandidateStorage {
 		self.by_candidate_hash.contains_key(candidate_hash)
 	}
 
+	/// Return an iterator over the stored candidates.
+	pub fn candidates(&self) -> impl Iterator<Item = &CandidateEntry> {
+		self.by_candidate_hash.values()
+	}
+
 	/// Retain only candidates which pass the predicate.
 	pub(crate) fn retain(&mut self, pred: impl Fn(&CandidateHash) -> bool) {
 		self.by_candidate_hash.retain(|h, _v| pred(h));
@@ -260,16 +265,17 @@ impl CandidateStorage {
 	}
 
 	/// Returns candidate's relay parent, if present.
-	pub(crate) fn relay_parent_by_candidate_hash(
-		&self,
-		candidate_hash: &CandidateHash,
-	) -> Option<Hash> {
+	pub(crate) fn relay_parent_of_candidate(&self, candidate_hash: &CandidateHash) -> Option<Hash> {
 		self.by_candidate_hash.get(candidate_hash).map(|entry| entry.relay_parent)
 	}
 
-	fn iter_para_children<'a>(
+	/// Returns the candidates which have the given head data hash as parent.
+	/// We don't allow forks in a parachain, but we may have multiple candidates with same parent
+	/// across different relay chain forks. That's why it returns an iterator (but only one will be
+	/// valid and used in the end).
+	fn possible_para_children<'a>(
 		&'a self,
-		parent_head_hash: &Hash,
+		parent_head_hash: &'a Hash,
 	) -> impl Iterator<Item = &'a CandidateEntry> + 'a {
 		let by_candidate_hash = &self.by_candidate_hash;
 		self.by_parent_head
@@ -279,10 +285,6 @@ impl CandidateStorage {
 			.filter_map(move |h| by_candidate_hash.get(h))
 	}
 
-	fn get(&'_ self, candidate_hash: &CandidateHash) -> Option<&'_ CandidateEntry> {
-		self.by_candidate_hash.get(candidate_hash)
-	}
-
 	#[cfg(test)]
 	pub fn len(&self) -> (usize, usize) {
 		(self.by_parent_head.len(), self.by_candidate_hash.len())
@@ -292,25 +294,38 @@ impl CandidateStorage {
 /// The state of a candidate.
 ///
 /// Candidates aren't even considered until they've at least been seconded.
-#[derive(Debug, PartialEq)]
-enum CandidateState {
-	/// The candidate has been introduced in a spam-protected way but
-	/// is not necessarily backed.
-	Introduced,
+#[derive(Debug, PartialEq, Clone)]
+pub(crate) enum CandidateState {
 	/// The candidate has been seconded.
 	Seconded,
 	/// The candidate has been completely backed by the group.
 	Backed,
 }
 
-#[derive(Debug)]
-struct CandidateEntry {
+#[derive(Debug, Clone)]
+pub(crate) struct CandidateEntry {
 	candidate_hash: CandidateHash,
+	parent_head_data_hash: Hash,
+	output_head_data_hash: Hash,
 	relay_parent: Hash,
-	candidate: ProspectiveCandidate<'static>,
+	candidate: Arc<ProspectiveCandidate>,
 	state: CandidateState,
 }
 
+impl CandidateEntry {
+	pub fn hash(&self) -> CandidateHash {
+		self.candidate_hash
+	}
+
+	pub fn parent_head_data_hash(&self) -> Hash {
+		self.parent_head_data_hash
+	}
+
+	pub fn output_head_data_hash(&self) -> Hash {
+		self.output_head_data_hash
+	}
+}
+
 /// A candidate existing on-chain but pending availability, for special treatment
 /// in the [`Scope`].
 #[derive(Debug, Clone)]
@@ -321,15 +336,22 @@ pub(crate) struct PendingAvailability {
 	pub relay_parent: RelayChainBlockInfo,
 }
 
-/// The scope of a [`FragmentTree`].
-#[derive(Debug)]
+/// The scope of a [`FragmentChain`].
+#[derive(Debug, Clone)]
 pub(crate) struct Scope {
+	/// The assigned para id of this `FragmentChain`.
 	para: ParaId,
+	/// The relay parent we're currently building on top of.
 	relay_parent: RelayChainBlockInfo,
+	/// The other relay parents candidates are allowed to build upon, mapped by the block number.
 	ancestors: BTreeMap<BlockNumber, RelayChainBlockInfo>,
+	/// The other relay parents candidates are allowed to build upon, mapped by the block hash.
 	ancestors_by_hash: HashMap<Hash, RelayChainBlockInfo>,
+	/// The candidates pending availability at this block.
 	pending_availability: Vec<PendingAvailability>,
+	/// The base constraints derived from the latest included candidate.
 	base_constraints: Constraints,
+	/// Equal to `max_candidate_depth`.
 	max_depth: usize,
 }
 
@@ -398,7 +420,7 @@ impl Scope {
 		})
 	}
 
-	/// Get the earliest relay-parent allowed in the scope of the fragment tree.
+	/// Get the earliest relay-parent allowed in the scope of the fragment chain.
 	pub fn earliest_relay_parent(&self) -> RelayChainBlockInfo {
 		self.ancestors
 			.iter()
@@ -407,8 +429,8 @@ impl Scope {
 			.unwrap_or_else(|| self.relay_parent.clone())
 	}
 
-	/// Get the ancestor of the fragment tree by hash.
-	pub fn ancestor_by_hash(&self, hash: &Hash) -> Option<RelayChainBlockInfo> {
+	/// Get the relay ancestor of the fragment chain by hash.
+	pub fn ancestor(&self, hash: &Hash) -> Option<RelayChainBlockInfo> {
 		if hash == &self.relay_parent.hash {
 			return Some(self.relay_parent.clone())
 		}
@@ -430,67 +452,48 @@ impl Scope {
 	}
 }
 
-/// We use indices into a flat vector to refer to nodes in the tree.
-/// Every tree also has an implicit root.
-#[derive(Debug, Clone, Copy, PartialEq)]
-enum NodePointer {
-	Root,
-	Storage(usize),
-}
-
-/// A hypothetical candidate, which may or may not exist in
-/// the fragment tree already.
-pub(crate) enum HypotheticalCandidate<'a> {
-	Complete {
-		receipt: Cow<'a, CommittedCandidateReceipt>,
-		persisted_validation_data: Cow<'a, PersistedValidationData>,
-	},
-	Incomplete {
-		relay_parent: Hash,
-		parent_head_data_hash: Hash,
-	},
+pub struct FragmentNode {
+	fragment: Fragment,
+	candidate_hash: CandidateHash,
+	cumulative_modifications: ConstraintModifications,
 }
 
-impl<'a> HypotheticalCandidate<'a> {
-	fn parent_head_data_hash(&self) -> Hash {
-		match *self {
-			HypotheticalCandidate::Complete { ref persisted_validation_data, .. } =>
-				persisted_validation_data.as_ref().parent_head.hash(),
-			HypotheticalCandidate::Incomplete { ref parent_head_data_hash, .. } =>
-				*parent_head_data_hash,
-		}
-	}
-
+impl FragmentNode {
 	fn relay_parent(&self) -> Hash {
-		match *self {
-			HypotheticalCandidate::Complete { ref receipt, .. } =>
-				receipt.descriptor().relay_parent,
-			HypotheticalCandidate::Incomplete { ref relay_parent, .. } => *relay_parent,
-		}
+		self.fragment.relay_parent().hash
 	}
 }
 
-/// This is a tree of candidates based on some underlying storage of candidates and a scope.
+/// Response given by `can_add_candidate_as_potential`
+#[derive(PartialEq, Debug)]
+pub enum PotentialAddition {
+	/// Can be added as either connected or unconnected candidate.
+	Anyhow,
+	/// Can only be added as a connected candidate to the chain.
+	IfConnected,
+	/// Cannot be added.
+	None,
+}
+
+/// This is a chain of candidates based on some underlying storage of candidates and a scope.
 ///
-/// All nodes in the tree must be either pending availability or within the scope. Within the scope
+/// All nodes in the chain must be either pending availability or within the scope. Within the scope
 /// means it's built off of the relay-parent or an ancestor.
-pub(crate) struct FragmentTree {
+pub(crate) struct FragmentChain {
 	scope: Scope,
 
-	// Invariant: a contiguous prefix of the 'nodes' storage will contain
-	// the top-level children.
-	nodes: Vec<FragmentNode>,
+	chain: Vec<FragmentNode>,
+
+	candidates: HashSet<CandidateHash>,
 
-	// The candidates stored in this tree, mapped to a bitvec indicating the depths
-	// where the candidate is stored.
-	candidates: HashMap<CandidateHash, BitVec<u16, Msb0>>,
+	// Index from head data hash to candidate hashes with that head data as a parent.
+	by_parent_head: HashMap<Hash, CandidateHash>,
+	// Index from head data hash to candidate hashes outputting that head data.
+	by_output_head: HashMap<Hash, CandidateHash>,
 }
 
-impl FragmentTree {
-	/// Create a new [`FragmentTree`] with given scope and populated from the storage.
-	///
-	/// Can be populated recursively (i.e. `populate` will pick up candidates that build on other
-	/// candidates).
+impl FragmentChain {
+	/// Create a new [`FragmentChain`] with given scope and populated from the storage.
 	pub fn populate(scope: Scope, storage: &CandidateStorage) -> Self {
 		gum::trace!(
 			target: LOG_TARGET,
@@ -498,285 +501,152 @@ impl FragmentTree {
 			relay_parent_num = scope.relay_parent.number,
 			para_id = ?scope.para,
 			ancestors = scope.ancestors.len(),
-			"Instantiating Fragment Tree",
+			"Instantiating Fragment Chain",
 		);
 
-		let mut tree = FragmentTree { scope, nodes: Vec::new(), candidates: HashMap::new() };
+		let mut fragment_chain = Self {
+			scope,
+			chain: Vec::new(),
+			candidates: HashSet::new(),
+			by_parent_head: HashMap::new(),
+			by_output_head: HashMap::new(),
+		};
 
-		tree.populate_from_bases(storage, vec![NodePointer::Root]);
+		fragment_chain.populate_chain(storage);
 
-		tree
+		fragment_chain
 	}
 
-	/// Get the scope of the Fragment Tree.
+	/// Get the scope of the Fragment Chain.
 	pub fn scope(&self) -> &Scope {
 		&self.scope
 	}
 
-	// Inserts a node and updates child references in a non-root parent.
-	fn insert_node(&mut self, node: FragmentNode) {
-		let pointer = NodePointer::Storage(self.nodes.len());
-		let parent_pointer = node.parent;
-		let candidate_hash = node.candidate_hash;
-
-		let max_depth = self.scope.max_depth;
-
-		self.candidates
-			.entry(candidate_hash)
-			.or_insert_with(|| bitvec![u16, Msb0; 0; max_depth + 1])
-			.set(node.depth, true);
-
-		match parent_pointer {
-			NodePointer::Storage(ptr) => {
-				self.nodes.push(node);
-				self.nodes[ptr].children.push((pointer, candidate_hash))
-			},
-			NodePointer::Root => {
-				// Maintain the invariant of node storage beginning with depth-0.
-				if self.nodes.last().map_or(true, |last| last.parent == NodePointer::Root) {
-					self.nodes.push(node);
-				} else {
-					let pos =
-						self.nodes.iter().take_while(|n| n.parent == NodePointer::Root).count();
-					self.nodes.insert(pos, node);
-				}
-			},
-		}
-	}
-
-	fn node_has_candidate_child(
-		&self,
-		pointer: NodePointer,
-		candidate_hash: &CandidateHash,
-	) -> bool {
-		self.node_candidate_child(pointer, candidate_hash).is_some()
-	}
-
-	fn node_candidate_child(
-		&self,
-		pointer: NodePointer,
-		candidate_hash: &CandidateHash,
-	) -> Option<NodePointer> {
-		match pointer {
-			NodePointer::Root => self
-				.nodes
-				.iter()
-				.take_while(|n| n.parent == NodePointer::Root)
-				.enumerate()
-				.find(|(_, n)| &n.candidate_hash == candidate_hash)
-				.map(|(i, _)| NodePointer::Storage(i)),
-			NodePointer::Storage(ptr) =>
-				self.nodes.get(ptr).and_then(|n| n.candidate_child(candidate_hash)),
-		}
+	/// Returns the number of candidates in the chain
+	pub(crate) fn len(&self) -> usize {
+		self.candidates.len()
 	}
 
-	/// Returns an O(n) iterator over the hashes of candidates contained in the
-	/// tree.
-	pub(crate) fn candidates(&self) -> impl Iterator<Item = CandidateHash> + '_ {
-		self.candidates.keys().cloned()
+	/// Whether the candidate exists.
+	pub(crate) fn contains_candidate(&self, candidate: &CandidateHash) -> bool {
+		self.candidates.contains(candidate)
 	}
 
-	/// Whether the candidate exists and at what depths.
-	pub(crate) fn candidate(&self, candidate: &CandidateHash) -> Option<Vec<usize>> {
-		self.candidates.get(candidate).map(|d| d.iter_ones().collect())
+	/// Return a vector of the chain's candidate hashes, in-order.
+	pub(crate) fn to_vec(&self) -> Vec<CandidateHash> {
+		self.chain.iter().map(|candidate| candidate.candidate_hash).collect()
 	}
 
-	/// Add a candidate and recursively populate from storage.
+	/// Try accumulating more candidates onto the chain.
 	///
-	/// Candidates can be added either as children of the root or children of other candidates.
-	pub(crate) fn add_and_populate(&mut self, hash: CandidateHash, storage: &CandidateStorage) {
-		let candidate_entry = match storage.get(&hash) {
-			None => return,
-			Some(e) => e,
-		};
-
-		let candidate_parent = &candidate_entry.candidate.persisted_validation_data.parent_head;
-
-		// Select an initial set of bases, whose required relay-parent matches that of the
-		// candidate.
-		let root_base = if &self.scope.base_constraints.required_parent == candidate_parent {
-			Some(NodePointer::Root)
-		} else {
-			None
-		};
-
-		let non_root_bases = self
-			.nodes
-			.iter()
-			.enumerate()
-			.filter(|(_, n)| {
-				n.cumulative_modifications.required_parent.as_ref() == Some(candidate_parent)
-			})
-			.map(|(i, _)| NodePointer::Storage(i));
-
-		let bases = root_base.into_iter().chain(non_root_bases).collect();
-
-		// Pass this into the population function, which will sanity-check stuff like depth,
-		// fragments, etc. and then recursively populate.
-		self.populate_from_bases(storage, bases);
+	/// Candidates can only be added if they build on the already existing chain.
+	pub(crate) fn extend_from_storage(&mut self, storage: &CandidateStorage) {
+		self.populate_chain(storage);
 	}
 
-	/// Returns `true` if the path from the root to the node's parent (inclusive)
-	/// only contains backed candidates, `false` otherwise.
-	fn path_contains_backed_only_candidates(
+	/// Returns the hypothetical state of a candidate with the given hash and parent head data
+	/// in regards to the existing chain.
+	///
+	/// Returns true if either:
+	/// - the candidate is already present
+	/// - the candidate can be added to the chain
+	/// - the candidate could potentially be added to the chain in the future (its ancestors are
+	///   still unknown but it doesn't violate other rules).
+	///
+	/// If this returns false, the candidate could never be added to the current chain (not now, not
+	/// ever)
+	pub(crate) fn hypothetical_membership(
 		&self,
-		mut parent_pointer: NodePointer,
+		candidate: HypotheticalCandidate,
 		candidate_storage: &CandidateStorage,
 	) -> bool {
-		while let NodePointer::Storage(ptr) = parent_pointer {
-			let node = &self.nodes[ptr];
-			let candidate_hash = &node.candidate_hash;
-
-			if candidate_storage.get(candidate_hash).map_or(true, |candidate_entry| {
-				!matches!(candidate_entry.state, CandidateState::Backed)
-			}) {
-				return false
-			}
-			parent_pointer = node.parent;
+		let candidate_hash = candidate.candidate_hash();
+
+		// If we've already used this candidate in the chain
+		if self.candidates.contains(&candidate_hash) {
+			return true
 		}
 
-		true
-	}
+		let can_add_as_potential = self.can_add_candidate_as_potential(
+			candidate_storage,
+			&candidate.candidate_hash(),
+			&candidate.relay_parent(),
+			candidate.parent_head_data_hash(),
+			candidate.output_head_data_hash(),
+		);
 
-	/// Returns the hypothetical depths where a candidate with the given hash and parent head data
-	/// would be added to the tree, without applying other candidates recursively on top of it.
-	///
-	/// If the candidate is already known, this returns the actual depths where this
-	/// candidate is part of the tree.
-	///
-	/// Setting `backed_in_path_only` to `true` ensures this function only returns such membership
-	/// that every candidate in the path from the root is backed.
-	pub(crate) fn hypothetical_depths(
-		&self,
-		hash: CandidateHash,
-		candidate: HypotheticalCandidate,
-		candidate_storage: &CandidateStorage,
-		backed_in_path_only: bool,
-	) -> Vec<usize> {
-		// if `true`, we always have to traverse the tree.
-		if !backed_in_path_only {
-			// if known.
-			if let Some(depths) = self.candidates.get(&hash) {
-				return depths.iter_ones().collect()
-			}
+		if can_add_as_potential == PotentialAddition::None {
+			return false
 		}
 
-		// if out of scope.
-		let candidate_relay_parent = candidate.relay_parent();
-		let candidate_relay_parent = if self.scope.relay_parent.hash == candidate_relay_parent {
-			self.scope.relay_parent.clone()
-		} else if let Some(info) = self.scope.ancestors_by_hash.get(&candidate_relay_parent) {
-			info.clone()
+		let Some(candidate_relay_parent) = self.scope.ancestor(&candidate.relay_parent()) else {
+			// can_add_candidate_as_potential already checked for this, but just to be safe.
+			return false
+		};
+
+		let identity_modifications = ConstraintModifications::identity();
+		let cumulative_modifications = if let Some(last_candidate) = self.chain.last() {
+			&last_candidate.cumulative_modifications
 		} else {
-			return Vec::new()
+			&identity_modifications
 		};
 
-		let max_depth = self.scope.max_depth;
-		let mut depths = bitvec![u16, Msb0; 0; max_depth + 1];
-
-		// iterate over all nodes where parent head-data matches,
-		// relay-parent number is <= candidate, and depth < max_depth.
-		let node_pointers = (0..self.nodes.len()).map(NodePointer::Storage);
-		for parent_pointer in std::iter::once(NodePointer::Root).chain(node_pointers) {
-			let (modifications, child_depth, earliest_rp) = match parent_pointer {
-				NodePointer::Root =>
-					(ConstraintModifications::identity(), 0, self.scope.earliest_relay_parent()),
-				NodePointer::Storage(ptr) => {
-					let node = &self.nodes[ptr];
-					let parent_rp = self
-						.scope
-						.ancestor_by_hash(&node.relay_parent())
-						.or_else(|| {
-							self.scope
-								.get_pending_availability(&node.candidate_hash)
-								.map(|_| self.scope.earliest_relay_parent())
-						})
-						.expect("All nodes in tree are either pending availability or within scope; qed");
-
-					(node.cumulative_modifications.clone(), node.depth + 1, parent_rp)
+		let child_constraints =
+			match self.scope.base_constraints.apply_modifications(&cumulative_modifications) {
+				Err(e) => {
+					gum::debug!(
+						target: LOG_TARGET,
+						new_parent_head = ?cumulative_modifications.required_parent,
+						?candidate_hash,
+						err = ?e,
+						"Failed to apply modifications",
+					);
+
+					return false
 				},
+				Ok(c) => c,
 			};
 
-			if child_depth > max_depth {
-				continue
-			}
-
-			if earliest_rp.number > candidate_relay_parent.number {
-				continue
-			}
-
-			let child_constraints =
-				match self.scope.base_constraints.apply_modifications(&modifications) {
-					Err(e) => {
-						gum::debug!(
-							target: LOG_TARGET,
-							new_parent_head = ?modifications.required_parent,
-							err = ?e,
-							"Failed to apply modifications",
-						);
-
-						continue
-					},
-					Ok(c) => c,
-				};
-
-			let parent_head_hash = candidate.parent_head_data_hash();
-			if parent_head_hash != child_constraints.required_parent.hash() {
-				continue
-			}
-
+		let parent_head_hash = candidate.parent_head_data_hash();
+		if parent_head_hash == child_constraints.required_parent.hash() {
 			// We do additional checks for complete candidates.
-			if let HypotheticalCandidate::Complete { ref receipt, ref persisted_validation_data } =
-				candidate
+			if let HypotheticalCandidate::Complete {
+				ref receipt,
+				ref persisted_validation_data,
+				..
+			} = candidate
 			{
-				let prospective_candidate = ProspectiveCandidate {
-					commitments: Cow::Borrowed(&receipt.commitments),
-					collator: receipt.descriptor().collator.clone(),
-					collator_signature: receipt.descriptor().signature.clone(),
-					persisted_validation_data: persisted_validation_data.as_ref().clone(),
-					pov_hash: receipt.descriptor().pov_hash,
-					validation_code_hash: receipt.descriptor().validation_code_hash,
-				};
-
-				if Fragment::new(
-					candidate_relay_parent.clone(),
-					child_constraints,
-					prospective_candidate,
+				if Fragment::check_against_constraints(
+					&candidate_relay_parent,
+					&child_constraints,
+					&receipt.commitments,
+					&receipt.descriptor().validation_code_hash,
+					persisted_validation_data,
 				)
 				.is_err()
 				{
-					continue
+					gum::debug!(
+						target: LOG_TARGET,
+						"Fragment::check_against_constraints() returned error",
+					);
+					return false
 				}
 			}
 
-			// Check that the path only contains backed candidates, if necessary.
-			if !backed_in_path_only ||
-				self.path_contains_backed_only_candidates(parent_pointer, candidate_storage)
-			{
-				depths.set(child_depth, true);
-			}
+			// If we got this far, it can be added to the chain right now.
+			true
+		} else if can_add_as_potential == PotentialAddition::Anyhow {
+			// Otherwise it is or can be an unconnected candidate, but only if PotentialAddition
+			// does not force us to only add a connected candidate.
+			true
+		} else {
+			false
 		}
-
-		depths.iter_ones().collect()
 	}
 
 	/// Select `count` candidates after the given `ancestors` which pass
 	/// the predicate and have not already been backed on chain.
 	///
-	/// Does an exhaustive search into the tree after traversing the ancestors path.
-	/// If the ancestors draw out a path that can be traversed in multiple ways, no
-	/// candidates will be returned.
-	/// If the ancestors do not draw out a full path (the path contains holes), candidates will be
-	/// suggested that may fill these holes.
-	/// If the ancestors don't draw out a valid path, no candidates will be returned. If there are
-	/// multiple possibilities of the same size, this will select the first one. If there is no
-	/// chain of size `count` that matches the criteria, this will return the largest chain it could
-	/// find with the criteria. If there are no candidates meeting those criteria, returns an empty
-	/// `Vec`.
-	/// Cycles are accepted, but this code expects that the runtime will deduplicate
-	/// identical candidates when occupying the cores (when proposing to back A->B->A, only A will
-	/// be backed on chain).
-	///
 	/// The intention of the `ancestors` is to allow queries on the basis of
 	/// one or more candidates which were previously pending availability becoming
 	/// available or candidates timing out.
@@ -789,362 +659,334 @@ impl FragmentTree {
 		if count == 0 {
 			return vec![]
 		}
-		// First, we need to order the ancestors.
-		// The node returned is the one from which we can start finding new backable candidates.
-		let Some(base_node) = self.find_ancestor_path(ancestors) else { return vec![] };
-
-		self.find_backable_chain_inner(
-			base_node,
-			count,
-			count,
-			&pred,
-			&mut Vec::with_capacity(count as usize),
-		)
-	}
+		let base_pos = self.find_ancestor_path(ancestors);
 
-	// Try finding a candidate chain starting from `base_node` of length `expected_count`.
-	// If not possible, return the longest one we could find.
-	// Does a depth-first search, since we're optimistic that there won't be more than one such
-	// chains (parachains shouldn't usually have forks). So in the usual case, this will conclude
-	// in `O(expected_count)`.
-	// Cycles are accepted, but this doesn't allow for infinite execution time, because the maximum
-	// depth we'll reach is `expected_count`.
-	//
-	// Worst case performance is `O(num_forks ^ expected_count)`, the same as populating the tree.
-	// Although an exponential function, this is actually a constant that can only be altered via
-	// sudo/governance, because:
-	// 1. `num_forks` at a given level is at most `max_candidate_depth * max_validators_per_core`
-	//    (because each validator in the assigned group can second `max_candidate_depth`
-	//    candidates). The prospective-parachains subsystem assumes that the number of para forks is
-	//    limited by collator-protocol and backing subsystems. In practice, this is a constant which
-	//    can only be altered by sudo or governance.
-	// 2. `expected_count` is equal to the number of cores a para is scheduled on (in an elastic
-	//    scaling scenario). For non-elastic-scaling, this is just 1. In practice, this should be a
-	//    small number (1-3), capped by the total number of available cores (a constant alterable
-	//    only via governance/sudo).
-	fn find_backable_chain_inner(
-		&self,
-		base_node: NodePointer,
-		expected_count: u32,
-		remaining_count: u32,
-		pred: &dyn Fn(&CandidateHash) -> bool,
-		accumulator: &mut Vec<CandidateHash>,
-	) -> Vec<CandidateHash> {
-		if remaining_count == 0 {
-			// The best option is the chain we've accumulated so far.
-			return accumulator.to_vec();
+		let actual_end_index = std::cmp::min(base_pos + (count as usize), self.chain.len());
+		let mut res = Vec::with_capacity(actual_end_index - base_pos);
+
+		for elem in &self.chain[base_pos..actual_end_index] {
+			if self.scope.get_pending_availability(&elem.candidate_hash).is_none() &&
+				pred(&elem.candidate_hash)
+			{
+				res.push(elem.candidate_hash);
+			} else {
+				break
+			}
 		}
 
-		let children: Vec<_> = match base_node {
-			NodePointer::Root => self
-				.nodes
-				.iter()
-				.enumerate()
-				.take_while(|(_, n)| n.parent == NodePointer::Root)
-				.filter(|(_, n)| self.scope.get_pending_availability(&n.candidate_hash).is_none())
-				.filter(|(_, n)| pred(&n.candidate_hash))
-				.map(|(ptr, n)| (NodePointer::Storage(ptr), n.candidate_hash))
-				.collect(),
-			NodePointer::Storage(base_node_ptr) => {
-				let base_node = &self.nodes[base_node_ptr];
-
-				base_node
-					.children
-					.iter()
-					.filter(|(_, hash)| self.scope.get_pending_availability(&hash).is_none())
-					.filter(|(_, hash)| pred(&hash))
-					.map(|(ptr, hash)| (*ptr, *hash))
-					.collect()
-			},
-		};
+		res
+	}
+
+	// Tries to orders the ancestors into a viable path from root to the last one.
+	// Stops when the ancestors are all used or when a node in the chain is not present in the
+	// ancestor set. Returns the index in the chain were the search stopped.
+	fn find_ancestor_path(&self, mut ancestors: Ancestors) -> usize {
+		if self.chain.is_empty() {
+			return 0;
+		}
 
-		let mut best_result = accumulator.clone();
-		for (child_ptr, child_hash) in children {
-			accumulator.push(child_hash);
-
-			let result = self.find_backable_chain_inner(
-				child_ptr,
-				expected_count,
-				remaining_count - 1,
-				&pred,
-				accumulator,
-			);
-
-			accumulator.pop();
-
-			// Short-circuit the search if we've found the right length. Otherwise, we'll
-			// search for a max.
-			// Taking the first best selection doesn't introduce bias or become gameable,
-			// because `find_ancestor_path` uses a `HashSet` to track the ancestors, which
-			// makes the order in which ancestors are visited non-deterministic.
-			if result.len() == expected_count as usize {
-				return result
-			} else if best_result.len() < result.len() {
-				best_result = result;
+		for (index, candidate) in self.chain.iter().enumerate() {
+			if !ancestors.remove(&candidate.candidate_hash) {
+				return index
 			}
 		}
 
-		best_result
+		// This means that we found the entire chain in the ancestor set. There won't be anything
+		// left to back.
+		self.chain.len()
+	}
+
+	// Return the earliest relay parent a new candidate can have in order to be added to the chain.
+	// This is the relay parent of the last candidate in the chain.
+	// The value returned may not be valid if we want to add a candidate pending availability, which
+	// may have a relay parent which is out of scope. Special handling is needed in that case.
+	// `None` is returned if the candidate's relay parent info cannot be found.
+	fn earliest_relay_parent(&self) -> Option<RelayChainBlockInfo> {
+		if let Some(last_candidate) = self.chain.last() {
+			self.scope.ancestor(&last_candidate.relay_parent()).or_else(|| {
+				// if the relay-parent is out of scope _and_ it is in the chain,
+				// it must be a candidate pending availability.
+				self.scope
+					.get_pending_availability(&last_candidate.candidate_hash)
+					.map(|c| c.relay_parent.clone())
+			})
+		} else {
+			Some(self.scope.earliest_relay_parent())
+		}
+	}
+
+	// Checks if this candidate could be added in the future to this chain.
+	// This assumes that the chain does not already contain this candidate. It may or may not be
+	// present in the `CandidateStorage`.
+	// Even if the candidate is a potential candidate, this function will indicate that it can be
+	// kept only if there's enough room for it.
+	pub(crate) fn can_add_candidate_as_potential(
+		&self,
+		storage: &CandidateStorage,
+		candidate_hash: &CandidateHash,
+		relay_parent: &Hash,
+		parent_head_hash: Hash,
+		output_head_hash: Option<Hash>,
+	) -> PotentialAddition {
+		// If we've got enough candidates for the configured depth, no point in adding more.
+		if self.chain.len() > self.scope.max_depth {
+			return PotentialAddition::None
+		}
+
+		if !self.check_potential(relay_parent, parent_head_hash, output_head_hash) {
+			return PotentialAddition::None
+		}
+
+		let present_in_storage = storage.contains(candidate_hash);
+
+		let unconnected = self
+			.find_unconnected_potential_candidates(
+				storage,
+				present_in_storage.then_some(candidate_hash),
+			)
+			.len();
+
+		if (self.chain.len() + unconnected) < self.scope.max_depth {
+			PotentialAddition::Anyhow
+		} else if (self.chain.len() + unconnected) == self.scope.max_depth {
+			// If we've only one slot left to fill, it must be filled with a connected candidate.
+			PotentialAddition::IfConnected
+		} else {
+			PotentialAddition::None
+		}
 	}
 
-	// Orders the ancestors into a viable path from root to the last one.
-	// Returns a pointer to the last node in the path.
-	// We assume that the ancestors form a chain (that the
-	// av-cores do not back parachain forks), None is returned otherwise.
-	// If we cannot use all ancestors, stop at the first found hole in the chain. This usually
-	// translates to a timed out candidate.
-	fn find_ancestor_path(&self, mut ancestors: Ancestors) -> Option<NodePointer> {
-		// The number of elements in the path we've processed so far.
-		let mut depth = 0;
-		let mut last_node = NodePointer::Root;
-		let mut next_node: Option<NodePointer> = Some(NodePointer::Root);
-
-		while let Some(node) = next_node {
-			if depth > self.scope.max_depth {
-				return None;
+	// The candidates which are present in `CandidateStorage`, are not part of this chain but could
+	// become part of this chain in the future. Capped at the max depth minus the existing chain
+	// length.
+	// If `ignore_candidate` is supplied and found in storage, it won't be counted.
+	pub(crate) fn find_unconnected_potential_candidates(
+		&self,
+		storage: &CandidateStorage,
+		ignore_candidate: Option<&CandidateHash>,
+	) -> Vec<CandidateHash> {
+		let mut candidates = vec![];
+		for candidate in storage.candidates() {
+			if let Some(ignore_candidate) = ignore_candidate {
+				if ignore_candidate == &candidate.candidate_hash {
+					continue
+				}
+			}
+			// We stop at max_depth + 1 with the search. There's no point in looping further.
+			if (self.chain.len() + candidates.len()) > self.scope.max_depth {
+				break
+			}
+			if !self.candidates.contains(&candidate.candidate_hash) &&
+				self.check_potential(
+					&candidate.relay_parent,
+					candidate.candidate.persisted_validation_data.parent_head.hash(),
+					Some(candidate.candidate.commitments.head_data.hash()),
+				) {
+				candidates.push(candidate.candidate_hash);
 			}
+		}
+
+		candidates
+	}
 
-			last_node = node;
+	// Check if adding a candidate which transitions `parent_head_hash` to `output_head_hash` would
+	// introduce a fork or a cycle in the parachain.
+	// `output_head_hash` is optional because we sometimes make this check before retrieving the
+	// collation.
+	fn is_fork_or_cycle(&self, parent_head_hash: Hash, output_head_hash: Option<Hash>) -> bool {
+		if self.by_parent_head.contains_key(&parent_head_hash) {
+			// fork. our parent has another child already
+			return true
+		}
 
-			next_node = match node {
-				NodePointer::Root => {
-					let children = self
-						.nodes
-						.iter()
-						.enumerate()
-						.take_while(|n| n.1.parent == NodePointer::Root)
-						.map(|(index, node)| (NodePointer::Storage(index), node.candidate_hash))
-						.collect::<Vec<_>>();
+		if let Some(output_head_hash) = output_head_hash {
+			if self.by_output_head.contains_key(&output_head_hash) {
+				// this is not a chain, there are multiple paths to the same state.
+				return true
+			}
 
-					self.find_valid_child(&mut ancestors, children.iter()).ok()?
-				},
-				NodePointer::Storage(ptr) => {
-					let children = self.nodes.get(ptr).and_then(|n| Some(n.children.iter()));
-					if let Some(children) = children {
-						self.find_valid_child(&mut ancestors, children).ok()?
-					} else {
-						None
-					}
-				},
-			};
+			// trivial 0-length cycle.
+			if parent_head_hash == output_head_hash {
+				return true
+			}
 
-			depth += 1;
+			// this should catch any other cycles. our output state cannot already be the parent
+			// state of another candidate, unless this is a cycle, since the already added
+			// candidates form a chain.
+			if self.by_parent_head.contains_key(&output_head_hash) {
+				return true
+			}
 		}
 
-		Some(last_node)
+		false
 	}
 
-	// Find a node from the given iterator which is present in the ancestors
-	// collection. If there are multiple such nodes, return an error and log a warning. We don't
-	// accept forks in a parachain to be backed. The supplied ancestors should all form a chain.
-	// If there is no such node, return None.
-	fn find_valid_child<'a>(
+	// Checks the potential of a candidate to be added to the chain in the future.
+	// Verifies that the relay parent is in scope and not moving backwards and that we're not
+	// introducing forks or cycles with other candidates in the chain.
+	// `output_head_hash` is optional because we sometimes make this check before retrieving the
+	// collation.
+	fn check_potential(
 		&self,
-		ancestors: &'a mut Ancestors,
-		nodes: impl Iterator<Item = &'a (NodePointer, CandidateHash)> + 'a,
-	) -> Result<Option<NodePointer>, ()> {
-		let mut possible_children =
-			nodes.filter_map(|(node_ptr, hash)| match ancestors.remove(&hash) {
-				true => Some(node_ptr),
-				false => None,
-			});
-
-		// We don't accept forks in a parachain to be backed. The supplied ancestors
-		// should all form a chain.
-		let next = possible_children.next();
-		if let Some(second_child) = possible_children.next() {
-			if let (Some(NodePointer::Storage(first_child)), NodePointer::Storage(second_child)) =
-				(next, second_child)
-			{
-				gum::error!(
-					target: LOG_TARGET,
-					para_id = ?self.scope.para,
-					relay_parent = ?self.scope.relay_parent,
-					"Trying to find new backable candidates for a parachain for which we've backed a fork.\
-					This is a bug and the runtime should not have allowed it.\n\
-					Backed candidates with the same parent: {}, {}",
-					self.nodes[*first_child].candidate_hash,
-					self.nodes[*second_child].candidate_hash,
-				);
-			}
+		relay_parent: &Hash,
+		parent_head_hash: Hash,
+		output_head_hash: Option<Hash>,
+	) -> bool {
+		if self.is_fork_or_cycle(parent_head_hash, output_head_hash) {
+			return false
+		}
 
-			Err(())
-		} else {
-			Ok(next.copied())
+		let Some(earliest_rp) = self.earliest_relay_parent() else { return false };
+
+		let Some(relay_parent) = self.scope.ancestor(relay_parent) else { return false };
+
+		if relay_parent.number < earliest_rp.number {
+			return false // relay parent moved backwards.
 		}
+
+		true
 	}
 
-	fn populate_from_bases(&mut self, storage: &CandidateStorage, initial_bases: Vec<NodePointer>) {
-		// Populate the tree breadth-first.
-		let mut last_sweep_start = None;
+	// Populate the fragment chain with candidates from CandidateStorage.
+	// Can be called by the constructor or when introducing a new candidate.
+	// If we're introducing a new candidate onto an existing chain, we may introduce more than one,
+	// since we may connect already existing candidates to the chain.
+	fn populate_chain(&mut self, storage: &CandidateStorage) {
+		let mut cumulative_modifications = if let Some(last_candidate) = self.chain.last() {
+			last_candidate.cumulative_modifications.clone()
+		} else {
+			ConstraintModifications::identity()
+		};
+		let Some(mut earliest_rp) = self.earliest_relay_parent() else { return };
 
 		loop {
-			let sweep_start = self.nodes.len();
-
-			if Some(sweep_start) == last_sweep_start {
-				break
+			if self.chain.len() > self.scope.max_depth {
+				break;
 			}
 
-			let parents: Vec<NodePointer> = if let Some(last_start) = last_sweep_start {
-				(last_start..self.nodes.len()).map(NodePointer::Storage).collect()
-			} else {
-				initial_bases.clone()
-			};
+			let child_constraints =
+				match self.scope.base_constraints.apply_modifications(&cumulative_modifications) {
+					Err(e) => {
+						gum::debug!(
+							target: LOG_TARGET,
+							new_parent_head = ?cumulative_modifications.required_parent,
+							err = ?e,
+							"Failed to apply modifications",
+						);
 
-			// 1. get parent head and find constraints
-			// 2. iterate all candidates building on the right head and viable relay parent
-			// 3. add new node
-			for parent_pointer in parents {
-				let (modifications, child_depth, earliest_rp) = match parent_pointer {
-					NodePointer::Root =>
-						(ConstraintModifications::identity(), 0, self.scope.earliest_relay_parent()),
-					NodePointer::Storage(ptr) => {
-						let node = &self.nodes[ptr];
-						let parent_rp = self
-							.scope
-							.ancestor_by_hash(&node.relay_parent())
-							.or_else(|| {
-								// if the relay-parent is out of scope _and_ it is in the tree,
-								// it must be a candidate pending availability.
-								self.scope
-									.get_pending_availability(&node.candidate_hash)
-									.map(|c| c.relay_parent.clone())
-							})
-							.expect("All nodes in tree are either pending availability or within scope; qed");
-
-						(node.cumulative_modifications.clone(), node.depth + 1, parent_rp)
+						break
 					},
+					Ok(c) => c,
 				};
 
-				if child_depth > self.scope.max_depth {
+			let required_head_hash = child_constraints.required_parent.hash();
+			// Even though we don't allow parachain forks under the same active leaf, they may still
+			// appear under different relay chain forks, hence the iterator below.
+			let possible_children = storage.possible_para_children(&required_head_hash);
+			let mut added_child = false;
+			for candidate in possible_children {
+				// Add one node to chain if
+				// 1. it does not introduce a fork or a cycle.
+				// 2. parent hash is correct.
+				// 3. relay-parent does not move backwards.
+				// 4. all non-pending-availability candidates have relay-parent in scope.
+				// 5. candidate outputs fulfill constraints
+
+				if self.is_fork_or_cycle(
+					candidate.parent_head_data_hash(),
+					Some(candidate.output_head_data_hash()),
+				) {
 					continue
 				}
 
-				let child_constraints =
-					match self.scope.base_constraints.apply_modifications(&modifications) {
+				let pending = self.scope.get_pending_availability(&candidate.candidate_hash);
+				let Some(relay_parent) = pending
+					.map(|p| p.relay_parent.clone())
+					.or_else(|| self.scope.ancestor(&candidate.relay_parent))
+				else {
+					continue
+				};
+
+				// require: candidates don't move backwards
+				// and only pending availability candidates can be out-of-scope.
+				//
+				// earliest_rp can be before the earliest relay parent in the scope
+				// when the parent is a pending availability candidate as well, but
+				// only other pending candidates can have a relay parent out of scope.
+				let min_relay_parent_number = pending
+					.map(|p| match self.chain.len() {
+						0 => p.relay_parent.number,
+						_ => earliest_rp.number,
+					})
+					.unwrap_or_else(|| earliest_rp.number);
+
+				if relay_parent.number < min_relay_parent_number {
+					continue // relay parent moved backwards.
+				}
+
+				// don't add candidates if they're already present in the chain.
+				// this can never happen, as candidates can only be duplicated if there's a cycle
+				// and we shouldn't have allowed for a cycle to be chained.
+				if self.contains_candidate(&candidate.candidate_hash) {
+					continue
+				}
+
+				let fragment = {
+					let mut constraints = child_constraints.clone();
+					if let Some(ref p) = pending {
+						// overwrite for candidates pending availability as a special-case.
+						constraints.min_relay_parent_number = p.relay_parent.number;
+					}
+
+					let f = Fragment::new(
+						relay_parent.clone(),
+						constraints,
+						// It's cheap to clone because it's wrapped in an Arc
+						candidate.candidate.clone(),
+					);
+
+					match f {
+						Ok(f) => f,
 						Err(e) => {
 							gum::debug!(
 								target: LOG_TARGET,
-								new_parent_head = ?modifications.required_parent,
 								err = ?e,
-								"Failed to apply modifications",
+								?relay_parent,
+								candidate_hash = ?candidate.candidate_hash,
+								"Failed to instantiate fragment",
 							);
 
-							continue
+							break
 						},
-						Ok(c) => c,
-					};
-
-				// Add nodes to tree wherever
-				// 1. parent hash is correct
-				// 2. relay-parent does not move backwards.
-				// 3. all non-pending-availability candidates have relay-parent in scope.
-				// 4. candidate outputs fulfill constraints
-				let required_head_hash = child_constraints.required_parent.hash();
-				for candidate in storage.iter_para_children(&required_head_hash) {
-					let pending = self.scope.get_pending_availability(&candidate.candidate_hash);
-					let relay_parent = pending
-						.map(|p| p.relay_parent.clone())
-						.or_else(|| self.scope.ancestor_by_hash(&candidate.relay_parent));
-
-					let relay_parent = match relay_parent {
-						Some(r) => r,
-						None => continue,
-					};
-
-					// require: pending availability candidates don't move backwards
-					// and only those can be out-of-scope.
-					//
-					// earliest_rp can be before the earliest relay parent in the scope
-					// when the parent is a pending availability candidate as well, but
-					// only other pending candidates can have a relay parent out of scope.
-					let min_relay_parent_number = pending
-						.map(|p| match parent_pointer {
-							NodePointer::Root => p.relay_parent.number,
-							NodePointer::Storage(_) => earliest_rp.number,
-						})
-						.unwrap_or_else(|| {
-							std::cmp::max(
-								earliest_rp.number,
-								self.scope.earliest_relay_parent().number,
-							)
-						});
-
-					if relay_parent.number < min_relay_parent_number {
-						continue // relay parent moved backwards.
 					}
+				};
 
-					// don't add candidates where the parent already has it as a child.
-					if self.node_has_candidate_child(parent_pointer, &candidate.candidate_hash) {
-						continue
-					}
+				// Update the cumulative constraint modifications.
+				cumulative_modifications.stack(fragment.constraint_modifications());
+				// Update the earliest rp
+				earliest_rp = relay_parent;
 
-					let fragment = {
-						let mut constraints = child_constraints.clone();
-						if let Some(ref p) = pending {
-							// overwrite for candidates pending availability as a special-case.
-							constraints.min_relay_parent_number = p.relay_parent.number;
-						}
-
-						let f = Fragment::new(
-							relay_parent.clone(),
-							constraints,
-							candidate.candidate.partial_clone(),
-						);
+				let node = FragmentNode {
+					fragment,
+					candidate_hash: candidate.candidate_hash,
+					cumulative_modifications: cumulative_modifications.clone(),
+				};
 
-						match f {
-							Ok(f) => f.into_owned(),
-							Err(e) => {
-								gum::debug!(
-									target: LOG_TARGET,
-									err = ?e,
-									?relay_parent,
-									candidate_hash = ?candidate.candidate_hash,
-									"Failed to instantiate fragment",
-								);
-
-								continue
-							},
-						}
-					};
-
-					let mut cumulative_modifications = modifications.clone();
-					cumulative_modifications.stack(fragment.constraint_modifications());
-
-					let node = FragmentNode {
-						parent: parent_pointer,
-						fragment,
-						candidate_hash: candidate.candidate_hash,
-						depth: child_depth,
-						cumulative_modifications,
-						children: Vec::new(),
-					};
-
-					self.insert_node(node);
-				}
+				self.chain.push(node);
+				self.candidates.insert(candidate.candidate_hash);
+				// We've already checked for forks and cycles.
+				self.by_parent_head
+					.insert(candidate.parent_head_data_hash(), candidate.candidate_hash);
+				self.by_output_head
+					.insert(candidate.output_head_data_hash(), candidate.candidate_hash);
+				added_child = true;
+				// We can only add one child for a candidate. (it's a chain, not a tree)
+				break;
 			}
 
-			last_sweep_start = Some(sweep_start);
+			if !added_child {
+				break
+			}
 		}
 	}
 }
-
-struct FragmentNode {
-	// A pointer to the parent node.
-	parent: NodePointer,
-	fragment: Fragment<'static>,
-	candidate_hash: CandidateHash,
-	depth: usize,
-	cumulative_modifications: ConstraintModifications,
-	children: Vec<(NodePointer, CandidateHash)>,
-}
-
-impl FragmentNode {
-	fn relay_parent(&self) -> Hash {
-		self.fragment.relay_parent().hash
-	}
-
-	fn candidate_child(&self, candidate_hash: &CandidateHash) -> Option<NodePointer> {
-		self.children.iter().find(|(_, c)| c == candidate_hash).map(|(p, _)| *p)
-	}
-}
diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs
index fd41be55f7f960367ee0a57ccd4a4c2251b90d13..26ee94d59d8ebd50bda29218c85b38476f077111 100644
--- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs
+++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs
@@ -19,17 +19,6 @@ use assert_matches::assert_matches;
 use polkadot_node_subsystem_util::inclusion_emulator::InboundHrmpLimitations;
 use polkadot_primitives::{BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData};
 use polkadot_primitives_test_helpers as test_helpers;
-use rstest::rstest;
-use std::iter;
-
-impl NodePointer {
-	fn unwrap_idx(self) -> usize {
-		match self {
-			NodePointer::Root => panic!("Unexpected root"),
-			NodePointer::Storage(index) => index,
-		}
-	}
-}
 
 fn make_constraints(
 	min_relay_parent_number: BlockNumber,
@@ -204,8 +193,52 @@ fn scope_only_takes_ancestors_up_to_min() {
 }
 
 #[test]
-fn storage_add_candidate() {
-	let mut storage = CandidateStorage::new();
+fn scope_rejects_unordered_ancestors() {
+	let para_id = ParaId::from(5u32);
+	let relay_parent = RelayChainBlockInfo {
+		number: 5,
+		hash: Hash::repeat_byte(0),
+		storage_root: Hash::repeat_byte(69),
+	};
+
+	let ancestors = vec![
+		RelayChainBlockInfo {
+			number: 4,
+			hash: Hash::repeat_byte(4),
+			storage_root: Hash::repeat_byte(69),
+		},
+		RelayChainBlockInfo {
+			number: 2,
+			hash: Hash::repeat_byte(2),
+			storage_root: Hash::repeat_byte(69),
+		},
+		RelayChainBlockInfo {
+			number: 3,
+			hash: Hash::repeat_byte(3),
+			storage_root: Hash::repeat_byte(69),
+		},
+	];
+
+	let max_depth = 2;
+	let base_constraints = make_constraints(0, vec![2], vec![1, 2, 3].into());
+	let pending_availability = Vec::new();
+
+	assert_matches!(
+		Scope::with_ancestors(
+			para_id,
+			relay_parent,
+			base_constraints,
+			pending_availability,
+			max_depth,
+			ancestors,
+		),
+		Err(UnexpectedAncestor { number: 2, prev: 4 })
+	);
+}
+
+#[test]
+fn candidate_storage_methods() {
+	let mut storage = CandidateStorage::default();
 	let relay_parent = Hash::repeat_byte(69);
 
 	let (pvd, candidate) = make_committed_candidate(
@@ -220,50 +253,105 @@ fn storage_add_candidate() {
 	let candidate_hash = candidate.hash();
 	let parent_head_hash = pvd.parent_head.hash();
 
-	storage.add_candidate(candidate, pvd).unwrap();
+	// Invalid pvd hash
+	let mut wrong_pvd = pvd.clone();
+	wrong_pvd.max_pov_size = 0;
+	assert_matches!(
+		storage.add_candidate(candidate.clone(), wrong_pvd, CandidateState::Seconded),
+		Err(CandidateStorageInsertionError::PersistedValidationDataMismatch)
+	);
+	assert!(!storage.contains(&candidate_hash));
+	assert_eq!(storage.possible_para_children(&parent_head_hash).count(), 0);
+	assert_eq!(storage.relay_parent_of_candidate(&candidate_hash), None);
+	assert_eq!(storage.head_data_by_hash(&candidate.descriptor.para_head), None);
+	assert_eq!(storage.head_data_by_hash(&parent_head_hash), None);
+	assert_eq!(storage.is_backed(&candidate_hash), false);
+
+	// Add a valid candidate
+	storage
+		.add_candidate(candidate.clone(), pvd.clone(), CandidateState::Seconded)
+		.unwrap();
 	assert!(storage.contains(&candidate_hash));
-	assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1);
-
-	assert_eq!(storage.relay_parent_by_candidate_hash(&candidate_hash), Some(relay_parent));
-}
+	assert_eq!(storage.possible_para_children(&parent_head_hash).count(), 1);
+	assert_eq!(storage.possible_para_children(&candidate.descriptor.para_head).count(), 0);
+	assert_eq!(storage.relay_parent_of_candidate(&candidate_hash), Some(relay_parent));
+	assert_eq!(
+		storage.head_data_by_hash(&candidate.descriptor.para_head).unwrap(),
+		&candidate.commitments.head_data
+	);
+	assert_eq!(storage.head_data_by_hash(&parent_head_hash).unwrap(), &pvd.parent_head);
+	assert_eq!(storage.is_backed(&candidate_hash), false);
 
-#[test]
-fn storage_retain() {
-	let mut storage = CandidateStorage::new();
+	storage.mark_backed(&candidate_hash);
+	assert_eq!(storage.is_backed(&candidate_hash), true);
 
-	let (pvd, candidate) = make_committed_candidate(
-		ParaId::from(5u32),
-		Hash::repeat_byte(69),
-		8,
-		vec![4, 5, 6].into(),
-		vec![1, 2, 3].into(),
-		7,
+	// Re-adding a candidate fails.
+	assert_matches!(
+		storage.add_candidate(candidate.clone(), pvd.clone(), CandidateState::Seconded),
+		Err(CandidateStorageInsertionError::CandidateAlreadyKnown(hash)) if candidate_hash == hash
 	);
 
-	let candidate_hash = candidate.hash();
-	let output_head_hash = candidate.commitments.head_data.hash();
-	let parent_head_hash = pvd.parent_head.hash();
-
-	storage.add_candidate(candidate, pvd).unwrap();
+	// Remove candidate and re-add it later in backed state.
+	storage.remove_candidate(&candidate_hash);
+	assert!(!storage.contains(&candidate_hash));
+	assert_eq!(storage.possible_para_children(&parent_head_hash).count(), 0);
+	assert_eq!(storage.relay_parent_of_candidate(&candidate_hash), None);
+	assert_eq!(storage.head_data_by_hash(&candidate.descriptor.para_head), None);
+	assert_eq!(storage.head_data_by_hash(&parent_head_hash), None);
+	assert_eq!(storage.is_backed(&candidate_hash), false);
+
+	storage
+		.add_candidate(candidate.clone(), pvd.clone(), CandidateState::Backed)
+		.unwrap();
+	assert_eq!(storage.is_backed(&candidate_hash), true);
+
+	// Test retain
 	storage.retain(|_| true);
 	assert!(storage.contains(&candidate_hash));
-	assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1);
-	assert!(storage.head_data_by_hash(&output_head_hash).is_some());
-
 	storage.retain(|_| false);
 	assert!(!storage.contains(&candidate_hash));
-	assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 0);
-	assert!(storage.head_data_by_hash(&output_head_hash).is_none());
+	assert_eq!(storage.possible_para_children(&parent_head_hash).count(), 0);
+	assert_eq!(storage.relay_parent_of_candidate(&candidate_hash), None);
+	assert_eq!(storage.head_data_by_hash(&candidate.descriptor.para_head), None);
+	assert_eq!(storage.head_data_by_hash(&parent_head_hash), None);
+	assert_eq!(storage.is_backed(&candidate_hash), false);
 }
 
-// [`FragmentTree::populate`] should pick up candidates that build on other candidates.
 #[test]
-fn populate_works_recursively() {
-	let mut storage = CandidateStorage::new();
+fn populate_and_extend_from_storage_empty() {
+	// Empty chain and empty storage.
+	let storage = CandidateStorage::default();
+	let base_constraints = make_constraints(0, vec![0], vec![0x0a].into());
+	let pending_availability = Vec::new();
+
+	let scope = Scope::with_ancestors(
+		ParaId::from(2),
+		RelayChainBlockInfo {
+			number: 1,
+			hash: Hash::repeat_byte(1),
+			storage_root: Hash::repeat_byte(2),
+		},
+		base_constraints,
+		pending_availability,
+		4,
+		vec![],
+	)
+	.unwrap();
+	let mut chain = FragmentChain::populate(scope, &storage);
+	assert!(chain.to_vec().is_empty());
+
+	chain.extend_from_storage(&storage);
+	assert!(chain.to_vec().is_empty());
+}
+
+#[test]
+fn populate_and_extend_from_storage_with_existing_empty_to_vec() {
+	let mut storage = CandidateStorage::default();
 
 	let para_id = ParaId::from(5u32);
 	let relay_parent_a = Hash::repeat_byte(1);
 	let relay_parent_b = Hash::repeat_byte(2);
+	let relay_parent_c = Hash::repeat_byte(3);
 
 	let (pvd_a, candidate_a) = make_committed_candidate(
 		para_id,
@@ -285,56 +373,623 @@ fn populate_works_recursively() {
 	);
 	let candidate_b_hash = candidate_b.hash();
 
-	let base_constraints = make_constraints(0, vec![0], vec![0x0a].into());
-	let pending_availability = Vec::new();
+	let (pvd_c, candidate_c) = make_committed_candidate(
+		para_id,
+		relay_parent_c,
+		2,
+		vec![0x0c].into(),
+		vec![0x0d].into(),
+		2,
+	);
+	let candidate_c_hash = candidate_c.hash();
 
-	let ancestors = vec![RelayChainBlockInfo {
+	let relay_parent_a_info = RelayChainBlockInfo {
 		number: pvd_a.relay_parent_number,
 		hash: relay_parent_a,
 		storage_root: pvd_a.relay_parent_storage_root,
-	}];
-
+	};
 	let relay_parent_b_info = RelayChainBlockInfo {
 		number: pvd_b.relay_parent_number,
 		hash: relay_parent_b,
 		storage_root: pvd_b.relay_parent_storage_root,
 	};
+	let relay_parent_c_info = RelayChainBlockInfo {
+		number: pvd_c.relay_parent_number,
+		hash: relay_parent_c,
+		storage_root: pvd_c.relay_parent_storage_root,
+	};
 
-	storage.add_candidate(candidate_a, pvd_a).unwrap();
-	storage.add_candidate(candidate_b, pvd_b).unwrap();
-	let scope = Scope::with_ancestors(
-		para_id,
-		relay_parent_b_info,
-		base_constraints,
-		pending_availability,
-		4,
-		ancestors,
-	)
-	.unwrap();
-	let tree = FragmentTree::populate(scope, &storage);
+	let base_constraints = make_constraints(0, vec![0], vec![0x0a].into());
+	let pending_availability = Vec::new();
+
+	let ancestors = vec![
+		// These need to be ordered in reverse.
+		relay_parent_b_info.clone(),
+		relay_parent_a_info.clone(),
+	];
+
+	storage
+		.add_candidate(candidate_a.clone(), pvd_a.clone(), CandidateState::Seconded)
+		.unwrap();
+	storage
+		.add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Backed)
+		.unwrap();
+	storage
+		.add_candidate(candidate_c.clone(), pvd_c.clone(), CandidateState::Backed)
+		.unwrap();
+
+	// Candidate A doesn't adhere to the base constraints.
+	{
+		for wrong_constraints in [
+			// Different required parent
+			make_constraints(0, vec![0], vec![0x0e].into()),
+			// Min relay parent number is wrong
+			make_constraints(1, vec![0], vec![0x0a].into()),
+		] {
+			let scope = Scope::with_ancestors(
+				para_id,
+				relay_parent_c_info.clone(),
+				wrong_constraints.clone(),
+				pending_availability.clone(),
+				4,
+				ancestors.clone(),
+			)
+			.unwrap();
+			let mut chain = FragmentChain::populate(scope, &storage);
+
+			assert!(chain.to_vec().is_empty());
+
+			chain.extend_from_storage(&storage);
+			assert!(chain.to_vec().is_empty());
+
+			// If the min relay parent number is wrong, candidate A can never become valid.
+			// Otherwise, if only the required parent doesn't match, candidate A is still a
+			// potential candidate.
+			if wrong_constraints.min_relay_parent_number == 1 {
+				assert_eq!(
+					chain.can_add_candidate_as_potential(
+						&storage,
+						&candidate_a.hash(),
+						&candidate_a.descriptor.relay_parent,
+						pvd_a.parent_head.hash(),
+						Some(candidate_a.commitments.head_data.hash()),
+					),
+					PotentialAddition::None
+				);
+			} else {
+				assert_eq!(
+					chain.can_add_candidate_as_potential(
+						&storage,
+						&candidate_a.hash(),
+						&candidate_a.descriptor.relay_parent,
+						pvd_a.parent_head.hash(),
+						Some(candidate_a.commitments.head_data.hash()),
+					),
+					PotentialAddition::Anyhow
+				);
+			}
+
+			// All other candidates can always be potential candidates.
+			for (candidate, pvd) in
+				[(candidate_b.clone(), pvd_b.clone()), (candidate_c.clone(), pvd_c.clone())]
+			{
+				assert_eq!(
+					chain.can_add_candidate_as_potential(
+						&storage,
+						&candidate.hash(),
+						&candidate.descriptor.relay_parent,
+						pvd.parent_head.hash(),
+						Some(candidate.commitments.head_data.hash()),
+					),
+					PotentialAddition::Anyhow
+				);
+			}
+		}
+	}
+
+	// Various max depths.
+	{
+		// depth is 0, will only allow 1 candidate
+		let scope = Scope::with_ancestors(
+			para_id,
+			relay_parent_c_info.clone(),
+			base_constraints.clone(),
+			pending_availability.clone(),
+			0,
+			ancestors.clone(),
+		)
+		.unwrap();
+		// Before populating the chain, all candidates are potential candidates. However, they can
+		// only be added as connected candidates, because only one candidates is allowed by max
+		// depth
+		let chain = FragmentChain::populate(scope.clone(), &CandidateStorage::default());
+		for (candidate, pvd) in [
+			(candidate_a.clone(), pvd_a.clone()),
+			(candidate_b.clone(), pvd_b.clone()),
+			(candidate_c.clone(), pvd_c.clone()),
+		] {
+			assert_eq!(
+				chain.can_add_candidate_as_potential(
+					&CandidateStorage::default(),
+					&candidate.hash(),
+					&candidate.descriptor.relay_parent,
+					pvd.parent_head.hash(),
+					Some(candidate.commitments.head_data.hash()),
+				),
+				PotentialAddition::IfConnected
+			);
+		}
+		let mut chain = FragmentChain::populate(scope, &storage);
+		assert_eq!(chain.to_vec(), vec![candidate_a_hash]);
+		chain.extend_from_storage(&storage);
+		assert_eq!(chain.to_vec(), vec![candidate_a_hash]);
+		// since depth is maxed out, we can't add more potential candidates
+		// candidate A is no longer a potential candidate because it's already present.
+		for (candidate, pvd) in [
+			(candidate_a.clone(), pvd_a.clone()),
+			(candidate_b.clone(), pvd_b.clone()),
+			(candidate_c.clone(), pvd_c.clone()),
+		] {
+			assert_eq!(
+				chain.can_add_candidate_as_potential(
+					&storage,
+					&candidate.hash(),
+					&candidate.descriptor.relay_parent,
+					pvd.parent_head.hash(),
+					Some(candidate.commitments.head_data.hash()),
+				),
+				PotentialAddition::None
+			);
+		}
+
+		// depth is 1, allows two candidates
+		let scope = Scope::with_ancestors(
+			para_id,
+			relay_parent_c_info.clone(),
+			base_constraints.clone(),
+			pending_availability.clone(),
+			1,
+			ancestors.clone(),
+		)
+		.unwrap();
+		// Before populating the chain, all candidates can be added as potential.
+		let mut modified_storage = CandidateStorage::default();
+		let chain = FragmentChain::populate(scope.clone(), &modified_storage);
+		for (candidate, pvd) in [
+			(candidate_a.clone(), pvd_a.clone()),
+			(candidate_b.clone(), pvd_b.clone()),
+			(candidate_c.clone(), pvd_c.clone()),
+		] {
+			assert_eq!(
+				chain.can_add_candidate_as_potential(
+					&modified_storage,
+					&candidate.hash(),
+					&candidate.descriptor.relay_parent,
+					pvd.parent_head.hash(),
+					Some(candidate.commitments.head_data.hash()),
+				),
+				PotentialAddition::Anyhow
+			);
+		}
+		// Add an unconnected candidate. We now should only allow a Connected candidate, because max
+		// depth only allows one more candidate.
+		modified_storage
+			.add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Seconded)
+			.unwrap();
+		let chain = FragmentChain::populate(scope.clone(), &modified_storage);
+		for (candidate, pvd) in
+			[(candidate_a.clone(), pvd_a.clone()), (candidate_c.clone(), pvd_c.clone())]
+		{
+			assert_eq!(
+				chain.can_add_candidate_as_potential(
+					&modified_storage,
+					&candidate.hash(),
+					&candidate.descriptor.relay_parent,
+					pvd.parent_head.hash(),
+					Some(candidate.commitments.head_data.hash()),
+				),
+				PotentialAddition::IfConnected
+			);
+		}
+
+		// Now try populating from all candidates.
+		let mut chain = FragmentChain::populate(scope, &storage);
+		assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]);
+		chain.extend_from_storage(&storage);
+		assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]);
+		// since depth is maxed out, we can't add more potential candidates
+		// candidate A and B are no longer a potential candidate because they're already present.
+		for (candidate, pvd) in [
+			(candidate_a.clone(), pvd_a.clone()),
+			(candidate_b.clone(), pvd_b.clone()),
+			(candidate_c.clone(), pvd_c.clone()),
+		] {
+			assert_eq!(
+				chain.can_add_candidate_as_potential(
+					&storage,
+					&candidate.hash(),
+					&candidate.descriptor.relay_parent,
+					pvd.parent_head.hash(),
+					Some(candidate.commitments.head_data.hash()),
+				),
+				PotentialAddition::None
+			);
+		}
+
+		// depths larger than 2, allows all candidates
+		for depth in 2..6 {
+			let scope = Scope::with_ancestors(
+				para_id,
+				relay_parent_c_info.clone(),
+				base_constraints.clone(),
+				pending_availability.clone(),
+				depth,
+				ancestors.clone(),
+			)
+			.unwrap();
+			let mut chain = FragmentChain::populate(scope, &storage);
+			assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]);
+			chain.extend_from_storage(&storage);
+			assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]);
+			// Candidates are no longer potential candidates because they're already part of the
+			// chain.
+			for (candidate, pvd) in [
+				(candidate_a.clone(), pvd_a.clone()),
+				(candidate_b.clone(), pvd_b.clone()),
+				(candidate_c.clone(), pvd_c.clone()),
+			] {
+				assert_eq!(
+					chain.can_add_candidate_as_potential(
+						&storage,
+						&candidate.hash(),
+						&candidate.descriptor.relay_parent,
+						pvd.parent_head.hash(),
+						Some(candidate.commitments.head_data.hash()),
+					),
+					PotentialAddition::None
+				);
+			}
+		}
+	}
+
+	// Wrong relay parents
+	{
+		// Candidates A has relay parent out of scope.
+		let ancestors_without_a = vec![relay_parent_b_info.clone()];
+		let scope = Scope::with_ancestors(
+			para_id,
+			relay_parent_c_info.clone(),
+			base_constraints.clone(),
+			pending_availability.clone(),
+			4,
+			ancestors_without_a,
+		)
+		.unwrap();
+
+		let mut chain = FragmentChain::populate(scope, &storage);
+		assert!(chain.to_vec().is_empty());
+
+		chain.extend_from_storage(&storage);
+		assert!(chain.to_vec().is_empty());
+
+		// Candidate A is not a potential candidate, but candidates B and C still are.
+		assert_eq!(
+			chain.can_add_candidate_as_potential(
+				&storage,
+				&candidate_a.hash(),
+				&candidate_a.descriptor.relay_parent,
+				pvd_a.parent_head.hash(),
+				Some(candidate_a.commitments.head_data.hash()),
+			),
+			PotentialAddition::None
+		);
+		for (candidate, pvd) in
+			[(candidate_b.clone(), pvd_b.clone()), (candidate_c.clone(), pvd_c.clone())]
+		{
+			assert_eq!(
+				chain.can_add_candidate_as_potential(
+					&storage,
+					&candidate.hash(),
+					&candidate.descriptor.relay_parent,
+					pvd.parent_head.hash(),
+					Some(candidate.commitments.head_data.hash()),
+				),
+				PotentialAddition::Anyhow
+			);
+		}
+
+		// Candidate C has the same relay parent as candidate A's parent. Relay parent not allowed
+		// to move backwards
+		let mut modified_storage = storage.clone();
+		modified_storage.remove_candidate(&candidate_c_hash);
+		let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate(
+			para_id,
+			relay_parent_a,
+			1,
+			vec![0x0c].into(),
+			vec![0x0d].into(),
+			2,
+		);
+		modified_storage
+			.add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded)
+			.unwrap();
+		let scope = Scope::with_ancestors(
+			para_id,
+			relay_parent_c_info.clone(),
+			base_constraints.clone(),
+			pending_availability.clone(),
+			4,
+			ancestors.clone(),
+		)
+		.unwrap();
+		let mut chain = FragmentChain::populate(scope, &modified_storage);
+		assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]);
+		chain.extend_from_storage(&modified_storage);
+		assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]);
+
+		// Candidate C is not even a potential candidate.
+		assert_eq!(
+			chain.can_add_candidate_as_potential(
+				&modified_storage,
+				&wrong_candidate_c.hash(),
+				&wrong_candidate_c.descriptor.relay_parent,
+				wrong_pvd_c.parent_head.hash(),
+				Some(wrong_candidate_c.commitments.head_data.hash()),
+			),
+			PotentialAddition::None
+		);
+	}
+
+	// Parachain fork and cycles are not allowed.
+	{
+		// Candidate C has the same parent as candidate B.
+		let mut modified_storage = storage.clone();
+		modified_storage.remove_candidate(&candidate_c_hash);
+		let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate(
+			para_id,
+			relay_parent_c,
+			2,
+			vec![0x0b].into(),
+			vec![0x0d].into(),
+			2,
+		);
+		modified_storage
+			.add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded)
+			.unwrap();
+		let scope = Scope::with_ancestors(
+			para_id,
+			relay_parent_c_info.clone(),
+			base_constraints.clone(),
+			pending_availability.clone(),
+			4,
+			ancestors.clone(),
+		)
+		.unwrap();
+		let mut chain = FragmentChain::populate(scope, &modified_storage);
+		// We'll either have A->B or A->C. It's not deterministic because CandidateStorage uses
+		// HashSets and HashMaps.
+		if chain.to_vec() == vec![candidate_a_hash, candidate_b_hash] {
+			chain.extend_from_storage(&modified_storage);
+			assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]);
+			// Candidate C is not even a potential candidate.
+			assert_eq!(
+				chain.can_add_candidate_as_potential(
+					&modified_storage,
+					&wrong_candidate_c.hash(),
+					&wrong_candidate_c.descriptor.relay_parent,
+					wrong_pvd_c.parent_head.hash(),
+					Some(wrong_candidate_c.commitments.head_data.hash()),
+				),
+				PotentialAddition::None
+			);
+		} else if chain.to_vec() == vec![candidate_a_hash, wrong_candidate_c.hash()] {
+			chain.extend_from_storage(&modified_storage);
+			assert_eq!(chain.to_vec(), vec![candidate_a_hash, wrong_candidate_c.hash()]);
+			// Candidate B is not even a potential candidate.
+			assert_eq!(
+				chain.can_add_candidate_as_potential(
+					&modified_storage,
+					&candidate_b.hash(),
+					&candidate_b.descriptor.relay_parent,
+					pvd_b.parent_head.hash(),
+					Some(candidate_b.commitments.head_data.hash()),
+				),
+				PotentialAddition::None
+			);
+		} else {
+			panic!("Unexpected chain: {:?}", chain.to_vec());
+		}
+
+		// Candidate C is a 0-length cycle.
+		// Candidate C has the same parent as candidate B.
+		let mut modified_storage = storage.clone();
+		modified_storage.remove_candidate(&candidate_c_hash);
+		let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate(
+			para_id,
+			relay_parent_c,
+			2,
+			vec![0x0c].into(),
+			vec![0x0c].into(),
+			2,
+		);
+		modified_storage
+			.add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded)
+			.unwrap();
+		let scope = Scope::with_ancestors(
+			para_id,
+			relay_parent_c_info.clone(),
+			base_constraints.clone(),
+			pending_availability.clone(),
+			4,
+			ancestors.clone(),
+		)
+		.unwrap();
+		let mut chain = FragmentChain::populate(scope, &modified_storage);
+		assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]);
+		chain.extend_from_storage(&modified_storage);
+		assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]);
+		// Candidate C is not even a potential candidate.
+		assert_eq!(
+			chain.can_add_candidate_as_potential(
+				&modified_storage,
+				&wrong_candidate_c.hash(),
+				&wrong_candidate_c.descriptor.relay_parent,
+				wrong_pvd_c.parent_head.hash(),
+				Some(wrong_candidate_c.commitments.head_data.hash()),
+			),
+			PotentialAddition::None
+		);
+
+		// Candidate C points back to the pre-state of candidate C.
+		let mut modified_storage = storage.clone();
+		modified_storage.remove_candidate(&candidate_c_hash);
+		let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate(
+			para_id,
+			relay_parent_c,
+			2,
+			vec![0x0c].into(),
+			vec![0x0b].into(),
+			2,
+		);
+		modified_storage
+			.add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded)
+			.unwrap();
+		let scope = Scope::with_ancestors(
+			para_id,
+			relay_parent_c_info.clone(),
+			base_constraints.clone(),
+			pending_availability.clone(),
+			4,
+			ancestors.clone(),
+		)
+		.unwrap();
+		let mut chain = FragmentChain::populate(scope, &modified_storage);
+		assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]);
+		chain.extend_from_storage(&modified_storage);
+		assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]);
+		// Candidate C is not even a potential candidate.
+		assert_eq!(
+			chain.can_add_candidate_as_potential(
+				&modified_storage,
+				&wrong_candidate_c.hash(),
+				&wrong_candidate_c.descriptor.relay_parent,
+				wrong_pvd_c.parent_head.hash(),
+				Some(wrong_candidate_c.commitments.head_data.hash()),
+			),
+			PotentialAddition::None
+		);
+	}
 
-	let candidates: Vec<_> = tree.candidates().collect();
-	assert_eq!(candidates.len(), 2);
-	assert!(candidates.contains(&candidate_a_hash));
-	assert!(candidates.contains(&candidate_b_hash));
+	// Test with candidates pending availability
+	{
+		// Valid options
+		for pending in [
+			vec![PendingAvailability {
+				candidate_hash: candidate_a_hash,
+				relay_parent: relay_parent_a_info.clone(),
+			}],
+			vec![
+				PendingAvailability {
+					candidate_hash: candidate_a_hash,
+					relay_parent: relay_parent_a_info.clone(),
+				},
+				PendingAvailability {
+					candidate_hash: candidate_b_hash,
+					relay_parent: relay_parent_b_info.clone(),
+				},
+			],
+			vec![
+				PendingAvailability {
+					candidate_hash: candidate_a_hash,
+					relay_parent: relay_parent_a_info.clone(),
+				},
+				PendingAvailability {
+					candidate_hash: candidate_b_hash,
+					relay_parent: relay_parent_b_info.clone(),
+				},
+				PendingAvailability {
+					candidate_hash: candidate_c_hash,
+					relay_parent: relay_parent_c_info.clone(),
+				},
+			],
+		] {
+			let scope = Scope::with_ancestors(
+				para_id,
+				relay_parent_c_info.clone(),
+				base_constraints.clone(),
+				pending,
+				3,
+				ancestors.clone(),
+			)
+			.unwrap();
+			let mut chain = FragmentChain::populate(scope, &storage);
+			assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]);
+			chain.extend_from_storage(&storage);
+			assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]);
+		}
 
-	assert_eq!(tree.nodes.len(), 2);
-	assert_eq!(tree.nodes[0].parent, NodePointer::Root);
-	assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash);
-	assert_eq!(tree.nodes[0].depth, 0);
+		// Relay parents of pending availability candidates can be out of scope
+		// Relay parent of candidate A is out of scope.
+		let ancestors_without_a = vec![relay_parent_b_info.clone()];
+		let scope = Scope::with_ancestors(
+			para_id,
+			relay_parent_c_info.clone(),
+			base_constraints.clone(),
+			vec![PendingAvailability {
+				candidate_hash: candidate_a_hash,
+				relay_parent: relay_parent_a_info.clone(),
+			}],
+			4,
+			ancestors_without_a,
+		)
+		.unwrap();
+		let mut chain = FragmentChain::populate(scope, &storage);
+		assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]);
+		chain.extend_from_storage(&storage);
+		assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]);
+
+		// Even relay parents of pending availability candidates which are out of scope cannot move
+		// backwards.
+		let scope = Scope::with_ancestors(
+			para_id,
+			relay_parent_c_info.clone(),
+			base_constraints.clone(),
+			vec![
+				PendingAvailability {
+					candidate_hash: candidate_a_hash,
+					relay_parent: RelayChainBlockInfo {
+						hash: relay_parent_a_info.hash,
+						number: 1,
+						storage_root: relay_parent_a_info.storage_root,
+					},
+				},
+				PendingAvailability {
+					candidate_hash: candidate_b_hash,
+					relay_parent: RelayChainBlockInfo {
+						hash: relay_parent_b_info.hash,
+						number: 0,
+						storage_root: relay_parent_b_info.storage_root,
+					},
+				},
+			],
+			4,
+			vec![],
+		)
+		.unwrap();
+		let mut chain = FragmentChain::populate(scope, &storage);
+		assert!(chain.to_vec().is_empty());
 
-	assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0));
-	assert_eq!(tree.nodes[1].candidate_hash, candidate_b_hash);
-	assert_eq!(tree.nodes[1].depth, 1);
+		chain.extend_from_storage(&storage);
+		assert!(chain.to_vec().is_empty());
+	}
 }
 
 #[test]
-fn children_of_root_are_contiguous() {
-	let mut storage = CandidateStorage::new();
-
+fn extend_from_storage_with_existing_to_vec() {
 	let para_id = ParaId::from(5u32);
 	let relay_parent_a = Hash::repeat_byte(1);
 	let relay_parent_b = Hash::repeat_byte(2);
+	let relay_parent_d = Hash::repeat_byte(3);
 
 	let (pvd_a, candidate_a) = make_committed_candidate(
 		para_id,
@@ -344,6 +999,7 @@ fn children_of_root_are_contiguous() {
 		vec![0x0b].into(),
 		0,
 	);
+	let candidate_a_hash = candidate_a.hash();
 
 	let (pvd_b, candidate_b) = make_committed_candidate(
 		para_id,
@@ -353,182 +1009,136 @@ fn children_of_root_are_contiguous() {
 		vec![0x0c].into(),
 		1,
 	);
+	let candidate_b_hash = candidate_b.hash();
 
-	let (pvd_a2, candidate_a2) = make_committed_candidate(
+	let (pvd_c, candidate_c) = make_committed_candidate(
 		para_id,
-		relay_parent_a,
-		0,
-		vec![0x0a].into(),
-		vec![0x0b, 1].into(),
-		0,
+		// Use the same relay parent number as B to test that it doesn't need to change between
+		// candidates.
+		relay_parent_b,
+		1,
+		vec![0x0c].into(),
+		vec![0x0d].into(),
+		1,
 	);
-	let candidate_a2_hash = candidate_a2.hash();
+	let candidate_c_hash = candidate_c.hash();
 
-	let base_constraints = make_constraints(0, vec![0], vec![0x0a].into());
-	let pending_availability = Vec::new();
+	// Candidate D will never be added to the chain.
+	let (pvd_d, candidate_d) = make_committed_candidate(
+		para_id,
+		relay_parent_d,
+		2,
+		vec![0x0e].into(),
+		vec![0x0f].into(),
+		1,
+	);
 
-	let ancestors = vec![RelayChainBlockInfo {
+	let relay_parent_a_info = RelayChainBlockInfo {
 		number: pvd_a.relay_parent_number,
 		hash: relay_parent_a,
 		storage_root: pvd_a.relay_parent_storage_root,
-	}];
-
+	};
 	let relay_parent_b_info = RelayChainBlockInfo {
 		number: pvd_b.relay_parent_number,
 		hash: relay_parent_b,
 		storage_root: pvd_b.relay_parent_storage_root,
 	};
+	let relay_parent_d_info = RelayChainBlockInfo {
+		number: pvd_d.relay_parent_number,
+		hash: relay_parent_d,
+		storage_root: pvd_d.relay_parent_storage_root,
+	};
 
-	storage.add_candidate(candidate_a, pvd_a).unwrap();
-	storage.add_candidate(candidate_b, pvd_b).unwrap();
-	let scope = Scope::with_ancestors(
-		para_id,
-		relay_parent_b_info,
-		base_constraints,
-		pending_availability,
-		4,
-		ancestors,
-	)
-	.unwrap();
-	let mut tree = FragmentTree::populate(scope, &storage);
+	let base_constraints = make_constraints(0, vec![0], vec![0x0a].into());
+	let pending_availability = Vec::new();
+
+	let ancestors = vec![
+		// These need to be ordered in reverse.
+		relay_parent_b_info.clone(),
+		relay_parent_a_info.clone(),
+	];
 
-	storage.add_candidate(candidate_a2, pvd_a2).unwrap();
-	tree.add_and_populate(candidate_a2_hash, &storage);
-	let candidates: Vec<_> = tree.candidates().collect();
-	assert_eq!(candidates.len(), 3);
+	// Already had A and C in the storage. Introduce B, which should add both B and C to the chain
+	// now.
+	{
+		let mut storage = CandidateStorage::default();
+		storage
+			.add_candidate(candidate_a.clone(), pvd_a.clone(), CandidateState::Seconded)
+			.unwrap();
+		storage
+			.add_candidate(candidate_c.clone(), pvd_c.clone(), CandidateState::Seconded)
+			.unwrap();
+		storage
+			.add_candidate(candidate_d.clone(), pvd_d.clone(), CandidateState::Seconded)
+			.unwrap();
+
+		let scope = Scope::with_ancestors(
+			para_id,
+			relay_parent_d_info.clone(),
+			base_constraints.clone(),
+			pending_availability.clone(),
+			4,
+			ancestors.clone(),
+		)
+		.unwrap();
+		let mut chain = FragmentChain::populate(scope, &storage);
+		assert_eq!(chain.to_vec(), vec![candidate_a_hash]);
+
+		storage
+			.add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Seconded)
+			.unwrap();
+		chain.extend_from_storage(&storage);
+		assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]);
+	}
 
-	assert_eq!(tree.nodes[0].parent, NodePointer::Root);
-	assert_eq!(tree.nodes[1].parent, NodePointer::Root);
-	assert_eq!(tree.nodes[2].parent, NodePointer::Storage(0));
+	// Already had A and B in the chain. Introduce C.
+	{
+		let mut storage = CandidateStorage::default();
+		storage
+			.add_candidate(candidate_a.clone(), pvd_a.clone(), CandidateState::Seconded)
+			.unwrap();
+		storage
+			.add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Seconded)
+			.unwrap();
+		storage
+			.add_candidate(candidate_d.clone(), pvd_d.clone(), CandidateState::Seconded)
+			.unwrap();
+
+		let scope = Scope::with_ancestors(
+			para_id,
+			relay_parent_d_info.clone(),
+			base_constraints.clone(),
+			pending_availability.clone(),
+			4,
+			ancestors.clone(),
+		)
+		.unwrap();
+		let mut chain = FragmentChain::populate(scope, &storage);
+		assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]);
+
+		storage
+			.add_candidate(candidate_c.clone(), pvd_c.clone(), CandidateState::Seconded)
+			.unwrap();
+		chain.extend_from_storage(&storage);
+		assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]);
+	}
 }
 
 #[test]
-fn add_candidate_child_of_root() {
-	let mut storage = CandidateStorage::new();
-
+fn test_find_ancestor_path_and_find_backable_chain_empty_to_vec() {
 	let para_id = ParaId::from(5u32);
-	let relay_parent_a = Hash::repeat_byte(1);
+	let relay_parent = Hash::repeat_byte(1);
+	let required_parent: HeadData = vec![0xff].into();
+	let max_depth = 10;
 
-	let (pvd_a, candidate_a) = make_committed_candidate(
-		para_id,
-		relay_parent_a,
-		0,
-		vec![0x0a].into(),
-		vec![0x0b].into(),
-		0,
-	);
-
-	let (pvd_b, candidate_b) = make_committed_candidate(
-		para_id,
-		relay_parent_a,
-		0,
-		vec![0x0a].into(),
-		vec![0x0c].into(),
-		0,
-	);
-	let candidate_b_hash = candidate_b.hash();
-
-	let base_constraints = make_constraints(0, vec![0], vec![0x0a].into());
-	let pending_availability = Vec::new();
-
-	let relay_parent_a_info = RelayChainBlockInfo {
-		number: pvd_a.relay_parent_number,
-		hash: relay_parent_a,
-		storage_root: pvd_a.relay_parent_storage_root,
-	};
-
-	storage.add_candidate(candidate_a, pvd_a).unwrap();
-	let scope = Scope::with_ancestors(
-		para_id,
-		relay_parent_a_info,
-		base_constraints,
-		pending_availability,
-		4,
-		vec![],
-	)
-	.unwrap();
-	let mut tree = FragmentTree::populate(scope, &storage);
-
-	storage.add_candidate(candidate_b, pvd_b).unwrap();
-	tree.add_and_populate(candidate_b_hash, &storage);
-	let candidates: Vec<_> = tree.candidates().collect();
-	assert_eq!(candidates.len(), 2);
-
-	assert_eq!(tree.nodes[0].parent, NodePointer::Root);
-	assert_eq!(tree.nodes[1].parent, NodePointer::Root);
-}
-
-#[test]
-fn add_candidate_child_of_non_root() {
-	let mut storage = CandidateStorage::new();
-
-	let para_id = ParaId::from(5u32);
-	let relay_parent_a = Hash::repeat_byte(1);
-
-	let (pvd_a, candidate_a) = make_committed_candidate(
-		para_id,
-		relay_parent_a,
-		0,
-		vec![0x0a].into(),
-		vec![0x0b].into(),
-		0,
-	);
-
-	let (pvd_b, candidate_b) = make_committed_candidate(
-		para_id,
-		relay_parent_a,
-		0,
-		vec![0x0b].into(),
-		vec![0x0c].into(),
-		0,
-	);
-	let candidate_b_hash = candidate_b.hash();
-
-	let base_constraints = make_constraints(0, vec![0], vec![0x0a].into());
-	let pending_availability = Vec::new();
-
-	let relay_parent_a_info = RelayChainBlockInfo {
-		number: pvd_a.relay_parent_number,
-		hash: relay_parent_a,
-		storage_root: pvd_a.relay_parent_storage_root,
-	};
-
-	storage.add_candidate(candidate_a, pvd_a).unwrap();
-	let scope = Scope::with_ancestors(
-		para_id,
-		relay_parent_a_info,
-		base_constraints,
-		pending_availability,
-		4,
-		vec![],
-	)
-	.unwrap();
-	let mut tree = FragmentTree::populate(scope, &storage);
-
-	storage.add_candidate(candidate_b, pvd_b).unwrap();
-	tree.add_and_populate(candidate_b_hash, &storage);
-	let candidates: Vec<_> = tree.candidates().collect();
-	assert_eq!(candidates.len(), 2);
-
-	assert_eq!(tree.nodes[0].parent, NodePointer::Root);
-	assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0));
-}
-
-#[test]
-fn test_find_ancestor_path_and_find_backable_chain_empty_tree() {
-	let para_id = ParaId::from(5u32);
-	let relay_parent = Hash::repeat_byte(1);
-	let required_parent: HeadData = vec![0xff].into();
-	let max_depth = 10;
-
-	// Empty tree
-	let storage = CandidateStorage::new();
-	let base_constraints = make_constraints(0, vec![0], required_parent.clone());
-
-	let relay_parent_info =
-		RelayChainBlockInfo { number: 0, hash: relay_parent, storage_root: Hash::zero() };
-
-	let scope = Scope::with_ancestors(
+	// Empty chain
+	let storage = CandidateStorage::default();
+	let base_constraints = make_constraints(0, vec![0], required_parent.clone());
+
+	let relay_parent_info =
+		RelayChainBlockInfo { number: 0, hash: relay_parent, storage_root: Hash::zero() };
+
+	let scope = Scope::with_ancestors(
 		para_id,
 		relay_parent_info,
 		base_constraints,
@@ -537,64 +1147,23 @@ fn test_find_ancestor_path_and_find_backable_chain_empty_tree() {
 		vec![],
 	)
 	.unwrap();
-	let tree = FragmentTree::populate(scope, &storage);
-	assert_eq!(tree.candidates().collect::<Vec<_>>().len(), 0);
-	assert_eq!(tree.nodes.len(), 0);
+	let chain = FragmentChain::populate(scope, &storage);
+	assert!(chain.to_vec().is_empty());
 
-	assert_eq!(tree.find_ancestor_path(Ancestors::new()).unwrap(), NodePointer::Root);
-	assert_eq!(tree.find_backable_chain(Ancestors::new(), 2, |_| true), vec![]);
+	assert_eq!(chain.find_ancestor_path(Ancestors::new()), 0);
+	assert_eq!(chain.find_backable_chain(Ancestors::new(), 2, |_| true), vec![]);
 	// Invalid candidate.
 	let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect();
-	assert_eq!(tree.find_ancestor_path(ancestors.clone()), Some(NodePointer::Root));
-	assert_eq!(tree.find_backable_chain(ancestors, 2, |_| true), vec![]);
+	assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0);
+	assert_eq!(chain.find_backable_chain(ancestors, 2, |_| true), vec![]);
 }
 
-#[rstest]
-#[case(true, 13)]
-#[case(false, 8)]
-// The tree with no cycles looks like:
-// Make a tree that looks like this (note that there's no cycle):
-//         +-(root)-+
-//         |        |
-//    +----0---+    7
-//    |        |
-//    1----+   5
-//    |    |
-//    |    |
-//    2    6
-//    |
-//    3
-//    |
-//    4
-//
-// The tree with cycles is the same as the first but has a cycle from 4 back to the state
-// produced by 0 (It's bounded by the max_depth + 1).
-//         +-(root)-+
-//         |        |
-//    +----0---+    7
-//    |        |
-//    1----+   5
-//    |    |
-//    |    |
-//    2    6
-//    |
-//    3
-//    |
-//    4---+
-//    |   |
-//    1   5
-//    |
-//    2
-//    |
-//    3
-fn test_find_ancestor_path_and_find_backable_chain(
-	#[case] has_cycle: bool,
-	#[case] expected_node_count: usize,
-) {
+#[test]
+fn test_find_ancestor_path_and_find_backable_to_vec() {
 	let para_id = ParaId::from(5u32);
 	let relay_parent = Hash::repeat_byte(1);
 	let required_parent: HeadData = vec![0xff].into();
-	let max_depth = 7;
+	let max_depth = 5;
 	let relay_parent_number = 0;
 	let relay_parent_storage_root = Hash::repeat_byte(69);
 
@@ -650,42 +1219,13 @@ fn test_find_ancestor_path_and_find_backable_chain(
 		para_id,
 		relay_parent,
 		0,
-		vec![0].into(),
+		vec![4].into(),
 		vec![5].into(),
 		0,
 	));
-	// Candidate 6
-	candidates.push(make_committed_candidate(
-		para_id,
-		relay_parent,
-		0,
-		vec![1].into(),
-		vec![6].into(),
-		0,
-	));
-	// Candidate 7
-	candidates.push(make_committed_candidate(
-		para_id,
-		relay_parent,
-		0,
-		required_parent.clone(),
-		vec![7].into(),
-		0,
-	));
-
-	if has_cycle {
-		candidates[4] = make_committed_candidate(
-			para_id,
-			relay_parent,
-			0,
-			vec![3].into(),
-			vec![0].into(), // put the cycle here back to the output state of 0.
-			0,
-		);
-	}
 
 	let base_constraints = make_constraints(0, vec![0], required_parent.clone());
-	let mut storage = CandidateStorage::new();
+	let mut storage = CandidateStorage::default();
 
 	let relay_parent_info = RelayChainBlockInfo {
 		number: relay_parent_number,
@@ -694,265 +1234,175 @@ fn test_find_ancestor_path_and_find_backable_chain(
 	};
 
 	for (pvd, candidate) in candidates.iter() {
-		storage.add_candidate(candidate.clone(), pvd.clone()).unwrap();
+		storage
+			.add_candidate(candidate.clone(), pvd.clone(), CandidateState::Seconded)
+			.unwrap();
 	}
 	let candidates = candidates.into_iter().map(|(_pvd, candidate)| candidate).collect::<Vec<_>>();
 	let scope = Scope::with_ancestors(
 		para_id,
-		relay_parent_info,
-		base_constraints,
+		relay_parent_info.clone(),
+		base_constraints.clone(),
 		vec![],
 		max_depth,
 		vec![],
 	)
 	.unwrap();
-	let tree = FragmentTree::populate(scope, &storage);
-
-	assert_eq!(tree.candidates().collect::<Vec<_>>().len(), candidates.len());
-	assert_eq!(tree.nodes.len(), expected_node_count);
-
-	// Do some common tests on both trees.
-	{
-		// No ancestors supplied.
-		assert_eq!(tree.find_ancestor_path(Ancestors::new()).unwrap(), NodePointer::Root);
-		assert_eq!(
-			tree.find_backable_chain(Ancestors::new(), 4, |_| true),
-			[0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
-		);
-		// Ancestor which is not part of the tree. Will be ignored.
-		let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect();
-		assert_eq!(tree.find_ancestor_path(ancestors.clone()).unwrap(), NodePointer::Root);
-		assert_eq!(
-			tree.find_backable_chain(ancestors, 4, |_| true),
-			[0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
-		);
-		// A chain fork.
-		let ancestors: Ancestors =
-			[(candidates[0].hash()), (candidates[7].hash())].into_iter().collect();
-		assert_eq!(tree.find_ancestor_path(ancestors.clone()), None);
-		assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]);
+	let chain = FragmentChain::populate(scope, &storage);
 
-		// Ancestors which are part of the tree but don't form a path. Will be ignored.
-		let ancestors: Ancestors =
-			[candidates[1].hash(), candidates[2].hash()].into_iter().collect();
-		assert_eq!(tree.find_ancestor_path(ancestors.clone()).unwrap(), NodePointer::Root);
-		assert_eq!(
-			tree.find_backable_chain(ancestors, 4, |_| true),
-			[0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
-		);
-
-		// Valid ancestors.
-		let ancestors: Ancestors = [candidates[7].hash()].into_iter().collect();
-		let res = tree.find_ancestor_path(ancestors.clone()).unwrap();
-		let candidate = &tree.nodes[res.unwrap_idx()];
-		assert_eq!(candidate.candidate_hash, candidates[7].hash());
-		assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]);
-
-		let ancestors: Ancestors =
-			[candidates[2].hash(), candidates[0].hash(), candidates[1].hash()]
-				.into_iter()
-				.collect();
-		let res = tree.find_ancestor_path(ancestors.clone()).unwrap();
-		let candidate = &tree.nodes[res.unwrap_idx()];
-		assert_eq!(candidate.candidate_hash, candidates[2].hash());
-		assert_eq!(
-			tree.find_backable_chain(ancestors.clone(), 2, |_| true),
-			[3, 4].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
-		);
+	assert_eq!(candidates.len(), 6);
+	assert_eq!(chain.to_vec().len(), 6);
 
-		// Valid ancestors with candidates which have been omitted due to timeouts
-		let ancestors: Ancestors =
-			[candidates[0].hash(), candidates[2].hash()].into_iter().collect();
-		let res = tree.find_ancestor_path(ancestors.clone()).unwrap();
-		let candidate = &tree.nodes[res.unwrap_idx()];
-		assert_eq!(candidate.candidate_hash, candidates[0].hash());
-		assert_eq!(
-			tree.find_backable_chain(ancestors, 3, |_| true),
-			[1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
-		);
-
-		let ancestors: Ancestors =
-			[candidates[0].hash(), candidates[1].hash(), candidates[3].hash()]
-				.into_iter()
-				.collect();
-		let res = tree.find_ancestor_path(ancestors.clone()).unwrap();
-		let candidate = &tree.nodes[res.unwrap_idx()];
-		assert_eq!(candidate.candidate_hash, candidates[1].hash());
-		if has_cycle {
-			assert_eq!(
-				tree.find_backable_chain(ancestors, 2, |_| true),
-				[2, 3].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
-			);
-		} else {
-			assert_eq!(
-				tree.find_backable_chain(ancestors, 4, |_| true),
-				[2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
-			);
-		}
+	// No ancestors supplied.
+	assert_eq!(chain.find_ancestor_path(Ancestors::new()), 0);
+	assert_eq!(chain.find_backable_chain(Ancestors::new(), 0, |_| true), vec![]);
+	assert_eq!(
+		chain.find_backable_chain(Ancestors::new(), 1, |_| true),
+		[0].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
+	);
+	assert_eq!(
+		chain.find_backable_chain(Ancestors::new(), 2, |_| true),
+		[0, 1].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
+	);
+	assert_eq!(
+		chain.find_backable_chain(Ancestors::new(), 5, |_| true),
+		[0, 1, 2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
+	);
 
-		let ancestors: Ancestors =
-			[candidates[1].hash(), candidates[2].hash()].into_iter().collect();
-		let res = tree.find_ancestor_path(ancestors.clone()).unwrap();
-		assert_eq!(res, NodePointer::Root);
+	for count in 6..10 {
 		assert_eq!(
-			tree.find_backable_chain(ancestors, 4, |_| true),
-			[0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
+			chain.find_backable_chain(Ancestors::new(), count, |_| true),
+			[0, 1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
 		);
+	}
 
-		// Requested count is 0.
-		assert_eq!(tree.find_backable_chain(Ancestors::new(), 0, |_| true), vec![]);
+	assert_eq!(
+		chain.find_backable_chain(Ancestors::new(), 7, |_| true),
+		[0, 1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
+	);
+	assert_eq!(
+		chain.find_backable_chain(Ancestors::new(), 10, |_| true),
+		[0, 1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
+	);
 
-		let ancestors: Ancestors =
-			[candidates[2].hash(), candidates[0].hash(), candidates[1].hash()]
-				.into_iter()
-				.collect();
-		assert_eq!(tree.find_backable_chain(ancestors, 0, |_| true), vec![]);
+	// Ancestor which is not part of the chain. Will be ignored.
+	let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect();
+	assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0);
+	assert_eq!(
+		chain.find_backable_chain(ancestors, 4, |_| true),
+		[0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
+	);
+	let ancestors: Ancestors =
+		[candidates[1].hash(), CandidateHash::default()].into_iter().collect();
+	assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0);
+	assert_eq!(
+		chain.find_backable_chain(ancestors, 4, |_| true),
+		[0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
+	);
+	let ancestors: Ancestors =
+		[candidates[0].hash(), CandidateHash::default()].into_iter().collect();
+	assert_eq!(chain.find_ancestor_path(ancestors.clone()), 1);
+	assert_eq!(
+		chain.find_backable_chain(ancestors, 4, |_| true),
+		[1, 2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
+	);
 
-		let ancestors: Ancestors =
-			[candidates[2].hash(), candidates[0].hash()].into_iter().collect();
-		assert_eq!(tree.find_backable_chain(ancestors, 0, |_| true), vec![]);
-	}
+	// Ancestors which are part of the chain but don't form a path from root. Will be ignored.
+	let ancestors: Ancestors = [candidates[1].hash(), candidates[2].hash()].into_iter().collect();
+	assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0);
+	assert_eq!(
+		chain.find_backable_chain(ancestors, 4, |_| true),
+		[0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
+	);
 
-	// Now do some tests only on the tree with cycles
-	if has_cycle {
-		// Exceeds the maximum tree depth. 0-1-2-3-4-1-2-3-4, when the tree stops at
-		// 0-1-2-3-4-1-2-3.
-		let ancestors: Ancestors = [
-			candidates[0].hash(),
-			candidates[1].hash(),
-			candidates[2].hash(),
-			candidates[3].hash(),
-			candidates[4].hash(),
-		]
+	// Valid ancestors.
+	let ancestors: Ancestors = [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()]
 		.into_iter()
 		.collect();
-		let res = tree.find_ancestor_path(ancestors.clone()).unwrap();
-		let candidate = &tree.nodes[res.unwrap_idx()];
-		assert_eq!(candidate.candidate_hash, candidates[4].hash());
-		assert_eq!(
-			tree.find_backable_chain(ancestors, 4, |_| true),
-			[1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
-		);
-
-		// 0-1-2.
-		let ancestors: Ancestors =
-			[candidates[0].hash(), candidates[1].hash(), candidates[2].hash()]
-				.into_iter()
-				.collect();
-		let res = tree.find_ancestor_path(ancestors.clone()).unwrap();
-		let candidate = &tree.nodes[res.unwrap_idx()];
-		assert_eq!(candidate.candidate_hash, candidates[2].hash());
-		assert_eq!(
-			tree.find_backable_chain(ancestors.clone(), 1, |_| true),
-			[3].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
-		);
+	assert_eq!(chain.find_ancestor_path(ancestors.clone()), 3);
+	assert_eq!(
+		chain.find_backable_chain(ancestors.clone(), 2, |_| true),
+		[3, 4].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
+	);
+	for count in 3..10 {
 		assert_eq!(
-			tree.find_backable_chain(ancestors, 5, |_| true),
-			[3, 4, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
+			chain.find_backable_chain(ancestors.clone(), count, |_| true),
+			[3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
 		);
+	}
 
-		// 0-1
-		let ancestors: Ancestors =
-			[candidates[0].hash(), candidates[1].hash()].into_iter().collect();
-		let res = tree.find_ancestor_path(ancestors.clone()).unwrap();
-		let candidate = &tree.nodes[res.unwrap_idx()];
-		assert_eq!(candidate.candidate_hash, candidates[1].hash());
+	// Valid ancestors with candidates which have been omitted due to timeouts
+	let ancestors: Ancestors = [candidates[0].hash(), candidates[2].hash()].into_iter().collect();
+	assert_eq!(chain.find_ancestor_path(ancestors.clone()), 1);
+	assert_eq!(
+		chain.find_backable_chain(ancestors.clone(), 3, |_| true),
+		[1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
+	);
+	assert_eq!(
+		chain.find_backable_chain(ancestors.clone(), 4, |_| true),
+		[1, 2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
+	);
+	for count in 5..10 {
 		assert_eq!(
-			tree.find_backable_chain(ancestors, 6, |_| true),
-			[2, 3, 4, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>(),
+			chain.find_backable_chain(ancestors.clone(), count, |_| true),
+			[1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
 		);
+	}
 
-		// For 0-1-2-3-4-5, there's more than 1 way of finding this path in
-		// the tree. `None` should be returned. The runtime should not have accepted this.
-		let ancestors: Ancestors = [
-			candidates[0].hash(),
-			candidates[1].hash(),
-			candidates[2].hash(),
-			candidates[3].hash(),
-			candidates[4].hash(),
-			candidates[5].hash(),
-		]
+	let ancestors: Ancestors = [candidates[0].hash(), candidates[1].hash(), candidates[3].hash()]
 		.into_iter()
 		.collect();
-		let res = tree.find_ancestor_path(ancestors.clone());
-		assert_eq!(res, None);
-		assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]);
-	}
-}
-
-#[test]
-fn graceful_cycle_of_0() {
-	let mut storage = CandidateStorage::new();
-
-	let para_id = ParaId::from(5u32);
-	let relay_parent_a = Hash::repeat_byte(1);
-
-	let (pvd_a, candidate_a) = make_committed_candidate(
-		para_id,
-		relay_parent_a,
-		0,
-		vec![0x0a].into(),
-		vec![0x0a].into(), // input same as output
-		0,
+	assert_eq!(chain.find_ancestor_path(ancestors.clone()), 2);
+	assert_eq!(
+		chain.find_backable_chain(ancestors.clone(), 4, |_| true),
+		[2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
 	);
-	let candidate_a_hash = candidate_a.hash();
-	let base_constraints = make_constraints(0, vec![0], vec![0x0a].into());
-	let pending_availability = Vec::new();
 
-	let relay_parent_a_info = RelayChainBlockInfo {
-		number: pvd_a.relay_parent_number,
-		hash: relay_parent_a,
-		storage_root: pvd_a.relay_parent_storage_root,
-	};
-
-	let max_depth = 4;
-	storage.add_candidate(candidate_a, pvd_a).unwrap();
-	let scope = Scope::with_ancestors(
-		para_id,
-		relay_parent_a_info,
-		base_constraints,
-		pending_availability,
-		max_depth,
-		vec![],
-	)
-	.unwrap();
-	let tree = FragmentTree::populate(scope, &storage);
-
-	let candidates: Vec<_> = tree.candidates().collect();
-	assert_eq!(candidates.len(), 1);
-	assert_eq!(tree.nodes.len(), max_depth + 1);
-
-	assert_eq!(tree.nodes[0].parent, NodePointer::Root);
-	assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0));
-	assert_eq!(tree.nodes[2].parent, NodePointer::Storage(1));
-	assert_eq!(tree.nodes[3].parent, NodePointer::Storage(2));
-	assert_eq!(tree.nodes[4].parent, NodePointer::Storage(3));
-
-	assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash);
-	assert_eq!(tree.nodes[1].candidate_hash, candidate_a_hash);
-	assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash);
-	assert_eq!(tree.nodes[3].candidate_hash, candidate_a_hash);
-	assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash);
+	// Requested count is 0.
+	assert_eq!(chain.find_backable_chain(ancestors, 0, |_| true), vec![]);
 
+	// Stop when we've found a candidate for which pred returns false.
+	let ancestors: Ancestors = [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()]
+		.into_iter()
+		.collect();
 	for count in 1..10 {
 		assert_eq!(
-			tree.find_backable_chain(Ancestors::new(), count, |_| true),
-			iter::repeat(candidate_a_hash)
-				.take(std::cmp::min(count as usize, max_depth + 1))
-				.collect::<Vec<_>>()
+			// Stop at 4.
+			chain.find_backable_chain(ancestors.clone(), count, |hash| hash !=
+				&candidates[4].hash()),
+			[3].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
 		);
+	}
+
+	// Stop when we've found a candidate which is pending availability
+	{
+		let scope = Scope::with_ancestors(
+			para_id,
+			relay_parent_info.clone(),
+			base_constraints,
+			// Mark the third candidate as pending availability
+			vec![PendingAvailability {
+				candidate_hash: candidates[3].hash(),
+				relay_parent: relay_parent_info,
+			}],
+			max_depth,
+			vec![],
+		)
+		.unwrap();
+		let chain = FragmentChain::populate(scope, &storage);
+		let ancestors: Ancestors =
+			[candidates[0].hash(), candidates[1].hash()].into_iter().collect();
 		assert_eq!(
-			tree.find_backable_chain([candidate_a_hash].into_iter().collect(), count - 1, |_| true),
-			iter::repeat(candidate_a_hash)
-				.take(std::cmp::min(count as usize - 1, max_depth))
-				.collect::<Vec<_>>()
+			// Stop at 4.
+			chain.find_backable_chain(ancestors.clone(), 3, |_| true),
+			[2].into_iter().map(|i| candidates[i].hash()).collect::<Vec<_>>()
 		);
 	}
 }
 
 #[test]
-fn graceful_cycle_of_1() {
-	let mut storage = CandidateStorage::new();
+fn hypothetical_membership() {
+	let mut storage = CandidateStorage::default();
 
 	let para_id = ParaId::from(5u32);
 	let relay_parent_a = Hash::repeat_byte(1);
@@ -962,7 +1412,7 @@ fn graceful_cycle_of_1() {
 		relay_parent_a,
 		0,
 		vec![0x0a].into(),
-		vec![0x0b].into(), // input same as output
+		vec![0x0b].into(),
 		0,
 	);
 	let candidate_a_hash = candidate_a.hash();
@@ -972,13 +1422,12 @@ fn graceful_cycle_of_1() {
 		relay_parent_a,
 		0,
 		vec![0x0b].into(),
-		vec![0x0a].into(), // input same as output
+		vec![0x0c].into(),
 		0,
 	);
 	let candidate_b_hash = candidate_b.hash();
 
 	let base_constraints = make_constraints(0, vec![0], vec![0x0a].into());
-	let pending_availability = Vec::new();
 
 	let relay_parent_a_info = RelayChainBlockInfo {
 		number: pvd_a.relay_parent_number,
@@ -987,182 +1436,153 @@ fn graceful_cycle_of_1() {
 	};
 
 	let max_depth = 4;
-	storage.add_candidate(candidate_a, pvd_a).unwrap();
-	storage.add_candidate(candidate_b, pvd_b).unwrap();
+	storage.add_candidate(candidate_a, pvd_a, CandidateState::Seconded).unwrap();
+	storage.add_candidate(candidate_b, pvd_b, CandidateState::Seconded).unwrap();
 	let scope = Scope::with_ancestors(
 		para_id,
-		relay_parent_a_info,
-		base_constraints,
-		pending_availability,
+		relay_parent_a_info.clone(),
+		base_constraints.clone(),
+		vec![],
 		max_depth,
 		vec![],
 	)
 	.unwrap();
-	let tree = FragmentTree::populate(scope, &storage);
+	let chain = FragmentChain::populate(scope, &storage);
 
-	let candidates: Vec<_> = tree.candidates().collect();
-	assert_eq!(candidates.len(), 2);
-	assert_eq!(tree.nodes.len(), max_depth + 1);
+	assert_eq!(chain.to_vec().len(), 2);
 
-	assert_eq!(tree.nodes[0].parent, NodePointer::Root);
-	assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0));
-	assert_eq!(tree.nodes[2].parent, NodePointer::Storage(1));
-	assert_eq!(tree.nodes[3].parent, NodePointer::Storage(2));
-	assert_eq!(tree.nodes[4].parent, NodePointer::Storage(3));
+	// Check candidates which are already present
+	assert!(chain.hypothetical_membership(
+		HypotheticalCandidate::Incomplete {
+			parent_head_data_hash: HeadData::from(vec![0x0a]).hash(),
+			candidate_relay_parent: relay_parent_a,
+			candidate_para: para_id,
+			candidate_hash: candidate_a_hash,
+		},
+		&storage,
+	));
+	assert!(chain.hypothetical_membership(
+		HypotheticalCandidate::Incomplete {
+			parent_head_data_hash: HeadData::from(vec![0x0b]).hash(),
+			candidate_relay_parent: relay_parent_a,
+			candidate_para: para_id,
+			candidate_hash: candidate_b_hash,
+		},
+		&storage,
+	));
 
-	assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash);
-	assert_eq!(tree.nodes[1].candidate_hash, candidate_b_hash);
-	assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash);
-	assert_eq!(tree.nodes[3].candidate_hash, candidate_b_hash);
-	assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash);
+	// Forks not allowed.
+	assert!(!chain.hypothetical_membership(
+		HypotheticalCandidate::Incomplete {
+			parent_head_data_hash: HeadData::from(vec![0x0a]).hash(),
+			candidate_relay_parent: relay_parent_a,
+			candidate_para: para_id,
+			candidate_hash: CandidateHash(Hash::repeat_byte(21)),
+		},
+		&storage,
+	));
+	assert!(!chain.hypothetical_membership(
+		HypotheticalCandidate::Incomplete {
+			parent_head_data_hash: HeadData::from(vec![0x0b]).hash(),
+			candidate_relay_parent: relay_parent_a,
+			candidate_para: para_id,
+			candidate_hash: CandidateHash(Hash::repeat_byte(22)),
+		},
+		&storage,
+	));
 
-	assert_eq!(tree.find_backable_chain(Ancestors::new(), 1, |_| true), vec![candidate_a_hash],);
-	assert_eq!(
-		tree.find_backable_chain(Ancestors::new(), 2, |_| true),
-		vec![candidate_a_hash, candidate_b_hash],
-	);
-	assert_eq!(
-		tree.find_backable_chain(Ancestors::new(), 3, |_| true),
-		vec![candidate_a_hash, candidate_b_hash, candidate_a_hash],
-	);
-	assert_eq!(
-		tree.find_backable_chain([candidate_a_hash].into_iter().collect(), 2, |_| true),
-		vec![candidate_b_hash, candidate_a_hash],
-	);
+	// Unknown candidate which builds on top of the current chain.
+	assert!(chain.hypothetical_membership(
+		HypotheticalCandidate::Incomplete {
+			parent_head_data_hash: HeadData::from(vec![0x0c]).hash(),
+			candidate_relay_parent: relay_parent_a,
+			candidate_para: para_id,
+			candidate_hash: CandidateHash(Hash::repeat_byte(23)),
+		},
+		&storage,
+	));
 
-	assert_eq!(
-		tree.find_backable_chain(Ancestors::new(), 6, |_| true),
-		vec![
-			candidate_a_hash,
-			candidate_b_hash,
-			candidate_a_hash,
-			candidate_b_hash,
-			candidate_a_hash
-		],
-	);
+	// Unknown unconnected candidate which may be valid.
+	assert!(chain.hypothetical_membership(
+		HypotheticalCandidate::Incomplete {
+			parent_head_data_hash: HeadData::from(vec![0x0e]).hash(),
+			candidate_relay_parent: relay_parent_a,
+			candidate_para: para_id,
+			candidate_hash: CandidateHash(Hash::repeat_byte(23)),
+		},
+		&storage,
+	));
 
-	for count in 3..7 {
-		assert_eq!(
-			tree.find_backable_chain(
-				[candidate_a_hash, candidate_b_hash].into_iter().collect(),
-				count,
-				|_| true
-			),
-			vec![candidate_a_hash, candidate_b_hash, candidate_a_hash],
+	// The number of unconnected candidates is limited (chain.len() + unconnected) <= max_depth
+	{
+		// C will be an unconnected candidate.
+		let (pvd_c, candidate_c) = make_committed_candidate(
+			para_id,
+			relay_parent_a,
+			0,
+			vec![0x0e].into(),
+			vec![0x0f].into(),
+			0,
 		);
-	}
-}
+		let candidate_c_hash = candidate_c.hash();
 
-#[test]
-fn hypothetical_depths_known_and_unknown() {
-	let mut storage = CandidateStorage::new();
-
-	let para_id = ParaId::from(5u32);
-	let relay_parent_a = Hash::repeat_byte(1);
-
-	let (pvd_a, candidate_a) = make_committed_candidate(
-		para_id,
-		relay_parent_a,
-		0,
-		vec![0x0a].into(),
-		vec![0x0b].into(), // input same as output
-		0,
-	);
-	let candidate_a_hash = candidate_a.hash();
-
-	let (pvd_b, candidate_b) = make_committed_candidate(
-		para_id,
-		relay_parent_a,
-		0,
-		vec![0x0b].into(),
-		vec![0x0a].into(), // input same as output
-		0,
-	);
-	let candidate_b_hash = candidate_b.hash();
-
-	let base_constraints = make_constraints(0, vec![0], vec![0x0a].into());
-	let pending_availability = Vec::new();
-
-	let relay_parent_a_info = RelayChainBlockInfo {
-		number: pvd_a.relay_parent_number,
-		hash: relay_parent_a,
-		storage_root: pvd_a.relay_parent_storage_root,
-	};
-
-	let max_depth = 4;
-	storage.add_candidate(candidate_a, pvd_a).unwrap();
-	storage.add_candidate(candidate_b, pvd_b).unwrap();
-	let scope = Scope::with_ancestors(
-		para_id,
-		relay_parent_a_info,
-		base_constraints,
-		pending_availability,
-		max_depth,
-		vec![],
-	)
-	.unwrap();
-	let tree = FragmentTree::populate(scope, &storage);
+		// Add an invalid candidate in the storage. This would introduce a fork. Just to test that
+		// it's ignored.
+		let (invalid_pvd, invalid_candidate) = make_committed_candidate(
+			para_id,
+			relay_parent_a,
+			1,
+			vec![0x0a].into(),
+			vec![0x0b].into(),
+			0,
+		);
 
-	let candidates: Vec<_> = tree.candidates().collect();
-	assert_eq!(candidates.len(), 2);
-	assert_eq!(tree.nodes.len(), max_depth + 1);
+		let scope = Scope::with_ancestors(
+			para_id,
+			relay_parent_a_info,
+			base_constraints,
+			vec![],
+			2,
+			vec![],
+		)
+		.unwrap();
+		let mut storage = storage.clone();
+		storage.add_candidate(candidate_c, pvd_c, CandidateState::Seconded).unwrap();
 
-	assert_eq!(
-		tree.hypothetical_depths(
-			candidate_a_hash,
-			HypotheticalCandidate::Incomplete {
-				parent_head_data_hash: HeadData::from(vec![0x0a]).hash(),
-				relay_parent: relay_parent_a,
-			},
-			&storage,
-			false,
-		),
-		vec![0, 2, 4],
-	);
+		let chain = FragmentChain::populate(scope, &storage);
+		assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]);
 
-	assert_eq!(
-		tree.hypothetical_depths(
-			candidate_b_hash,
-			HypotheticalCandidate::Incomplete {
-				parent_head_data_hash: HeadData::from(vec![0x0b]).hash(),
-				relay_parent: relay_parent_a,
-			},
-			&storage,
-			false,
-		),
-		vec![1, 3],
-	);
+		storage
+			.add_candidate(invalid_candidate, invalid_pvd, CandidateState::Seconded)
+			.unwrap();
 
-	assert_eq!(
-		tree.hypothetical_depths(
-			CandidateHash(Hash::repeat_byte(21)),
+		// Check that C is accepted as a potential unconnected candidate.
+		assert!(!chain.hypothetical_membership(
 			HypotheticalCandidate::Incomplete {
-				parent_head_data_hash: HeadData::from(vec![0x0a]).hash(),
-				relay_parent: relay_parent_a,
+				parent_head_data_hash: HeadData::from(vec![0x0e]).hash(),
+				candidate_relay_parent: relay_parent_a,
+				candidate_hash: candidate_c_hash,
+				candidate_para: para_id
 			},
 			&storage,
-			false,
-		),
-		vec![0, 2, 4],
-	);
+		));
 
-	assert_eq!(
-		tree.hypothetical_depths(
-			CandidateHash(Hash::repeat_byte(22)),
+		// Since C is already an unconnected candidate in the storage.
+		assert!(!chain.hypothetical_membership(
 			HypotheticalCandidate::Incomplete {
-				parent_head_data_hash: HeadData::from(vec![0x0b]).hash(),
-				relay_parent: relay_parent_a,
+				parent_head_data_hash: HeadData::from(vec![0x0f]).hash(),
+				candidate_relay_parent: relay_parent_a,
+				candidate_para: para_id,
+				candidate_hash: CandidateHash(Hash::repeat_byte(23)),
 			},
 			&storage,
-			false,
-		),
-		vec![1, 3]
-	);
+		));
+	}
 }
 
 #[test]
-fn hypothetical_depths_stricter_on_complete() {
-	let storage = CandidateStorage::new();
+fn hypothetical_membership_stricter_on_complete_candidates() {
+	let storage = CandidateStorage::default();
 
 	let para_id = ParaId::from(5u32);
 	let relay_parent_a = Hash::repeat_byte(1);
@@ -1197,161 +1617,31 @@ fn hypothetical_depths_stricter_on_complete() {
 		vec![],
 	)
 	.unwrap();
-	let tree = FragmentTree::populate(scope, &storage);
-
-	assert_eq!(
-		tree.hypothetical_depths(
-			candidate_a_hash,
-			HypotheticalCandidate::Incomplete {
-				parent_head_data_hash: HeadData::from(vec![0x0a]).hash(),
-				relay_parent: relay_parent_a,
-			},
-			&storage,
-			false,
-		),
-		vec![0],
-	);
-
-	assert!(tree
-		.hypothetical_depths(
-			candidate_a_hash,
-			HypotheticalCandidate::Complete {
-				receipt: Cow::Owned(candidate_a),
-				persisted_validation_data: Cow::Owned(pvd_a),
-			},
-			&storage,
-			false,
-		)
-		.is_empty());
-}
-
-#[test]
-fn hypothetical_depths_backed_in_path() {
-	let mut storage = CandidateStorage::new();
-
-	let para_id = ParaId::from(5u32);
-	let relay_parent_a = Hash::repeat_byte(1);
-
-	let (pvd_a, candidate_a) = make_committed_candidate(
-		para_id,
-		relay_parent_a,
-		0,
-		vec![0x0a].into(),
-		vec![0x0b].into(),
-		0,
-	);
-	let candidate_a_hash = candidate_a.hash();
-
-	let (pvd_b, candidate_b) = make_committed_candidate(
-		para_id,
-		relay_parent_a,
-		0,
-		vec![0x0b].into(),
-		vec![0x0c].into(),
-		0,
-	);
-	let candidate_b_hash = candidate_b.hash();
-
-	let (pvd_c, candidate_c) = make_committed_candidate(
-		para_id,
-		relay_parent_a,
-		0,
-		vec![0x0b].into(),
-		vec![0x0d].into(),
-		0,
-	);
-
-	let base_constraints = make_constraints(0, vec![0], vec![0x0a].into());
-	let pending_availability = Vec::new();
-
-	let relay_parent_a_info = RelayChainBlockInfo {
-		number: pvd_a.relay_parent_number,
-		hash: relay_parent_a,
-		storage_root: pvd_a.relay_parent_storage_root,
-	};
-
-	let max_depth = 4;
-	storage.add_candidate(candidate_a, pvd_a).unwrap();
-	storage.add_candidate(candidate_b, pvd_b).unwrap();
-	storage.add_candidate(candidate_c, pvd_c).unwrap();
-
-	// `A` and `B` are backed, `C` is not.
-	storage.mark_backed(&candidate_a_hash);
-	storage.mark_backed(&candidate_b_hash);
-
-	let scope = Scope::with_ancestors(
-		para_id,
-		relay_parent_a_info,
-		base_constraints,
-		pending_availability,
-		max_depth,
-		vec![],
-	)
-	.unwrap();
-	let tree = FragmentTree::populate(scope, &storage);
-
-	let candidates: Vec<_> = tree.candidates().collect();
-	assert_eq!(candidates.len(), 3);
-	assert_eq!(tree.nodes.len(), 3);
-
-	let candidate_d_hash = CandidateHash(Hash::repeat_byte(0xAA));
-
-	assert_eq!(
-		tree.hypothetical_depths(
-			candidate_d_hash,
-			HypotheticalCandidate::Incomplete {
-				parent_head_data_hash: HeadData::from(vec![0x0a]).hash(),
-				relay_parent: relay_parent_a,
-			},
-			&storage,
-			true,
-		),
-		vec![0],
-	);
-
-	assert_eq!(
-		tree.hypothetical_depths(
-			candidate_d_hash,
-			HypotheticalCandidate::Incomplete {
-				parent_head_data_hash: HeadData::from(vec![0x0c]).hash(),
-				relay_parent: relay_parent_a,
-			},
-			&storage,
-			true,
-		),
-		vec![2],
-	);
-
-	assert_eq!(
-		tree.hypothetical_depths(
-			candidate_d_hash,
-			HypotheticalCandidate::Incomplete {
-				parent_head_data_hash: HeadData::from(vec![0x0d]).hash(),
-				relay_parent: relay_parent_a,
-			},
-			&storage,
-			true,
-		),
-		Vec::<usize>::new(),
-	);
+	let chain = FragmentChain::populate(scope, &storage);
+
+	assert!(chain.hypothetical_membership(
+		HypotheticalCandidate::Incomplete {
+			parent_head_data_hash: HeadData::from(vec![0x0a]).hash(),
+			candidate_relay_parent: relay_parent_a,
+			candidate_para: para_id,
+			candidate_hash: candidate_a_hash,
+		},
+		&storage,
+	));
 
-	assert_eq!(
-		tree.hypothetical_depths(
-			candidate_d_hash,
-			HypotheticalCandidate::Incomplete {
-				parent_head_data_hash: HeadData::from(vec![0x0d]).hash(),
-				relay_parent: relay_parent_a,
-			},
-			&storage,
-			false,
-		),
-		vec![2], // non-empty if `false`.
-	);
+	assert!(!chain.hypothetical_membership(
+		HypotheticalCandidate::Complete {
+			receipt: Arc::new(candidate_a),
+			persisted_validation_data: pvd_a,
+			candidate_hash: candidate_a_hash,
+		},
+		&storage,
+	));
 }
 
 #[test]
-fn pending_availability_in_scope() {
-	let mut storage = CandidateStorage::new();
+fn hypothetical_membership_with_pending_availability_in_scope() {
+	let mut storage = CandidateStorage::default();
 
 	let para_id = ParaId::from(5u32);
 	let relay_parent_a = Hash::repeat_byte(1);
@@ -1402,8 +1692,8 @@ fn pending_availability_in_scope() {
 	};
 
 	let max_depth = 4;
-	storage.add_candidate(candidate_a, pvd_a).unwrap();
-	storage.add_candidate(candidate_b, pvd_b).unwrap();
+	storage.add_candidate(candidate_a, pvd_a, CandidateState::Seconded).unwrap();
+	storage.add_candidate(candidate_b, pvd_b, CandidateState::Backed).unwrap();
 	storage.mark_backed(&candidate_a_hash);
 
 	let scope = Scope::with_ancestors(
@@ -1415,37 +1705,49 @@ fn pending_availability_in_scope() {
 		vec![relay_parent_b_info],
 	)
 	.unwrap();
-	let tree = FragmentTree::populate(scope, &storage);
+	let chain = FragmentChain::populate(scope, &storage);
 
-	let candidates: Vec<_> = tree.candidates().collect();
-	assert_eq!(candidates.len(), 2);
-	assert_eq!(tree.nodes.len(), 2);
+	assert_eq!(chain.to_vec().len(), 2);
 
 	let candidate_d_hash = CandidateHash(Hash::repeat_byte(0xAA));
 
-	assert_eq!(
-		tree.hypothetical_depths(
-			candidate_d_hash,
-			HypotheticalCandidate::Incomplete {
-				parent_head_data_hash: HeadData::from(vec![0x0b]).hash(),
-				relay_parent: relay_parent_c,
-			},
-			&storage,
-			false,
-		),
-		vec![1],
-	);
+	assert!(chain.hypothetical_membership(
+		HypotheticalCandidate::Incomplete {
+			parent_head_data_hash: HeadData::from(vec![0x0a]).hash(),
+			candidate_relay_parent: relay_parent_a,
+			candidate_hash: candidate_a_hash,
+			candidate_para: para_id
+		},
+		&storage,
+	));
 
-	assert_eq!(
-		tree.hypothetical_depths(
-			candidate_d_hash,
-			HypotheticalCandidate::Incomplete {
-				parent_head_data_hash: HeadData::from(vec![0x0c]).hash(),
-				relay_parent: relay_parent_b,
-			},
-			&storage,
-			false,
-		),
-		vec![2],
-	);
+	assert!(!chain.hypothetical_membership(
+		HypotheticalCandidate::Incomplete {
+			parent_head_data_hash: HeadData::from(vec![0x0a]).hash(),
+			candidate_relay_parent: relay_parent_c,
+			candidate_para: para_id,
+			candidate_hash: candidate_d_hash,
+		},
+		&storage,
+	));
+
+	assert!(!chain.hypothetical_membership(
+		HypotheticalCandidate::Incomplete {
+			parent_head_data_hash: HeadData::from(vec![0x0b]).hash(),
+			candidate_relay_parent: relay_parent_c,
+			candidate_para: para_id,
+			candidate_hash: candidate_d_hash,
+		},
+		&storage,
+	));
+
+	assert!(chain.hypothetical_membership(
+		HypotheticalCandidate::Incomplete {
+			parent_head_data_hash: HeadData::from(vec![0x0c]).hash(),
+			candidate_relay_parent: relay_parent_b,
+			candidate_para: para_id,
+			candidate_hash: candidate_d_hash,
+		},
+		&storage,
+	));
 }
diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs
index 0b1a2e034a2893e6397b986405df28a91776d819..d5bb5ff76ba8e8e603f2ab6b915b6c8fdb2d5f3c 100644
--- a/polkadot/node/core/prospective-parachains/src/lib.rs
+++ b/polkadot/node/core/prospective-parachains/src/lib.rs
@@ -21,22 +21,20 @@
 //! This is the main coordinator of work within the node for the collation and
 //! backing phases of parachain consensus.
 //!
-//! This is primarily an implementation of "Fragment Trees", as described in
+//! This is primarily an implementation of "Fragment Chains", as described in
 //! [`polkadot_node_subsystem_util::inclusion_emulator`].
 //!
 //! This subsystem also handles concerns such as the relay-chain being forkful and session changes.
 
-use std::{
-	borrow::Cow,
-	collections::{HashMap, HashSet},
-};
+use std::collections::{HashMap, HashSet};
 
+use fragment_chain::{FragmentChain, PotentialAddition};
 use futures::{channel::oneshot, prelude::*};
 
 use polkadot_node_subsystem::{
 	messages::{
-		Ancestors, ChainApiMessage, FragmentTreeMembership, HypotheticalCandidate,
-		HypotheticalFrontierRequest, IntroduceCandidateRequest, ParentHeadData,
+		Ancestors, ChainApiMessage, HypotheticalCandidate, HypotheticalMembership,
+		HypotheticalMembershipRequest, IntroduceSecondedCandidateRequest, ParentHeadData,
 		ProspectiveParachainsMessage, ProspectiveValidationDataRequest, RuntimeApiMessage,
 		RuntimeApiRequest,
 	},
@@ -56,7 +54,8 @@ use polkadot_primitives::{
 use crate::{
 	error::{FatalError, FatalResult, JfyiError, JfyiErrorResult, Result},
 	fragment_chain::{
-		CandidateStorage, CandidateStorageInsertionError, FragmentTree, Scope as TreeScope,
+		CandidateState, CandidateStorage, CandidateStorageInsertionError,
+		Scope as FragmentChainScope,
 	},
 };
 
@@ -72,7 +71,7 @@ const LOG_TARGET: &str = "parachain::prospective-parachains";
 
 struct RelayBlockViewData {
 	// Scheduling info for paras and upcoming paras.
-	fragment_trees: HashMap<ParaId, FragmentTree>,
+	fragment_chains: HashMap<ParaId, FragmentChain>,
 	pending_availability: HashSet<CandidateHash>,
 }
 
@@ -141,12 +140,10 @@ async fn run_iteration<Context>(
 			},
 			FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {},
 			FromOrchestra::Communication { msg } => match msg {
-				ProspectiveParachainsMessage::IntroduceCandidate(request, tx) =>
-					handle_candidate_introduced(&mut *ctx, view, request, tx).await?,
-				ProspectiveParachainsMessage::CandidateSeconded(para, candidate_hash) =>
-					handle_candidate_seconded(view, para, candidate_hash),
+				ProspectiveParachainsMessage::IntroduceSecondedCandidate(request, tx) =>
+					handle_introduce_seconded_candidate(&mut *ctx, view, request, tx, metrics).await,
 				ProspectiveParachainsMessage::CandidateBacked(para, candidate_hash) =>
-					handle_candidate_backed(&mut *ctx, view, para, candidate_hash).await?,
+					handle_candidate_backed(&mut *ctx, view, para, candidate_hash).await,
 				ProspectiveParachainsMessage::GetBackableCandidates(
 					relay_parent,
 					para,
@@ -154,10 +151,8 @@ async fn run_iteration<Context>(
 					ancestors,
 					tx,
 				) => answer_get_backable_candidates(&view, relay_parent, para, count, ancestors, tx),
-				ProspectiveParachainsMessage::GetHypotheticalFrontier(request, tx) =>
-					answer_hypothetical_frontier_request(&view, request, tx),
-				ProspectiveParachainsMessage::GetTreeMembership(para, candidate, tx) =>
-					answer_tree_membership_request(&view, para, candidate, tx),
+				ProspectiveParachainsMessage::GetHypotheticalMembership(request, tx) =>
+					answer_hypothetical_membership_request(&view, request, tx, metrics),
 				ProspectiveParachainsMessage::GetMinimumRelayParents(relay_parent, tx) =>
 					answer_minimum_relay_parents_request(&view, relay_parent, tx),
 				ProspectiveParachainsMessage::GetProspectiveValidationData(request, tx) =>
@@ -175,8 +170,8 @@ async fn handle_active_leaves_update<Context>(
 	metrics: &Metrics,
 ) -> JfyiErrorResult<()> {
 	// 1. clean up inactive leaves
-	// 2. determine all scheduled para at new block
-	// 3. construct new fragment tree for each para for each new leaf
+	// 2. determine all scheduled paras at the new block
+	// 3. construct new fragment chain for each para for each new leaf
 	// 4. prune candidate storage.
 
 	for deactivated in &update.deactivated {
@@ -203,9 +198,7 @@ async fn handle_active_leaves_update<Context>(
 			return Ok(())
 		};
 
-		let mut pending_availability = HashSet::new();
-		let scheduled_paras =
-			fetch_upcoming_paras(&mut *ctx, hash, &mut pending_availability).await?;
+		let scheduled_paras = fetch_upcoming_paras(&mut *ctx, hash).await?;
 
 		let block_info: RelayChainBlockInfo =
 			match fetch_block_info(&mut *ctx, &mut temp_header_cache, hash).await? {
@@ -227,30 +220,30 @@ async fn handle_active_leaves_update<Context>(
 		let ancestry =
 			fetch_ancestry(&mut *ctx, &mut temp_header_cache, hash, allowed_ancestry_len).await?;
 
+		let mut all_pending_availability = HashSet::new();
+
 		// Find constraints.
-		let mut fragment_trees = HashMap::new();
+		let mut fragment_chains = HashMap::new();
 		for para in scheduled_paras {
 			let candidate_storage =
-				view.candidate_storage.entry(para).or_insert_with(CandidateStorage::new);
+				view.candidate_storage.entry(para).or_insert_with(CandidateStorage::default);
 
 			let backing_state = fetch_backing_state(&mut *ctx, hash, para).await?;
 
-			let (constraints, pending_availability) = match backing_state {
-				Some(c) => c,
-				None => {
-					// This indicates a runtime conflict of some kind.
-
-					gum::debug!(
-						target: LOG_TARGET,
-						para_id = ?para,
-						relay_parent = ?hash,
-						"Failed to get inclusion backing state."
-					);
+			let Some((constraints, pending_availability)) = backing_state else {
+				// This indicates a runtime conflict of some kind.
+				gum::debug!(
+					target: LOG_TARGET,
+					para_id = ?para,
+					relay_parent = ?hash,
+					"Failed to get inclusion backing state."
+				);
 
-					continue
-				},
+				continue
 			};
 
+			all_pending_availability.extend(pending_availability.iter().map(|c| c.candidate_hash));
+
 			let pending_availability = preprocess_candidates_pending_availability(
 				ctx,
 				&mut temp_header_cache,
@@ -261,15 +254,15 @@ async fn handle_active_leaves_update<Context>(
 			let mut compact_pending = Vec::with_capacity(pending_availability.len());
 
 			for c in pending_availability {
-				let res = candidate_storage.add_candidate(c.candidate, c.persisted_validation_data);
+				let res = candidate_storage.add_candidate(
+					c.candidate,
+					c.persisted_validation_data,
+					CandidateState::Backed,
+				);
 				let candidate_hash = c.compact.candidate_hash;
-				compact_pending.push(c.compact);
 
 				match res {
-					Ok(_) | Err(CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => {
-						// Anything on-chain is guaranteed to be backed.
-						candidate_storage.mark_backed(&candidate_hash);
-					},
+					Ok(_) | Err(CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => {},
 					Err(err) => {
 						gum::warn!(
 							target: LOG_TARGET,
@@ -278,11 +271,15 @@ async fn handle_active_leaves_update<Context>(
 							?err,
 							"Scraped invalid candidate pending availability",
 						);
+
+						break
 					},
 				}
+
+				compact_pending.push(c.compact);
 			}
 
-			let scope = TreeScope::with_ancestors(
+			let scope = FragmentChainScope::with_ancestors(
 				para,
 				block_info.clone(),
 				constraints,
@@ -297,16 +294,26 @@ async fn handle_active_leaves_update<Context>(
 				relay_parent = ?hash,
 				min_relay_parent = scope.earliest_relay_parent().number,
 				para_id = ?para,
-				"Creating fragment tree"
+				"Creating fragment chain"
 			);
 
-			let tree = FragmentTree::populate(scope, &*candidate_storage);
+			let chain = FragmentChain::populate(scope, &*candidate_storage);
+
+			gum::trace!(
+				target: LOG_TARGET,
+				relay_parent = ?hash,
+				para_id = ?para,
+				"Populated fragment chain with {} candidates",
+				chain.len()
+			);
 
-			fragment_trees.insert(para, tree);
+			fragment_chains.insert(para, chain);
 		}
 
-		view.active_leaves
-			.insert(hash, RelayBlockViewData { fragment_trees, pending_availability });
+		view.active_leaves.insert(
+			hash,
+			RelayBlockViewData { fragment_chains, pending_availability: all_pending_availability },
+		);
 	}
 
 	if !update.deactivated.is_empty() {
@@ -318,18 +325,39 @@ async fn handle_active_leaves_update<Context>(
 }
 
 fn prune_view_candidate_storage(view: &mut View, metrics: &Metrics) {
-	metrics.time_prune_view_candidate_storage();
+	let _timer = metrics.time_prune_view_candidate_storage();
 
 	let active_leaves = &view.active_leaves;
 	let mut live_candidates = HashSet::new();
 	let mut live_paras = HashSet::new();
 	for sub_view in active_leaves.values() {
-		for (para_id, fragment_tree) in &sub_view.fragment_trees {
-			live_candidates.extend(fragment_tree.candidates());
+		live_candidates.extend(sub_view.pending_availability.iter().cloned());
+
+		for (para_id, fragment_chain) in &sub_view.fragment_chains {
+			live_candidates.extend(fragment_chain.to_vec());
 			live_paras.insert(*para_id);
 		}
+	}
 
-		live_candidates.extend(sub_view.pending_availability.iter().cloned());
+	let connected_candidates_count = live_candidates.len();
+	for (leaf, sub_view) in active_leaves.iter() {
+		for (para_id, fragment_chain) in &sub_view.fragment_chains {
+			if let Some(storage) = view.candidate_storage.get(para_id) {
+				let unconnected_potential =
+					fragment_chain.find_unconnected_potential_candidates(storage, None);
+				if !unconnected_potential.is_empty() {
+					gum::trace!(
+						target: LOG_TARGET,
+						?leaf,
+						"Keeping {} unconnected candidates for paraid {} in storage: {:?}",
+						unconnected_potential.len(),
+						para_id,
+						unconnected_potential
+					);
+				}
+				live_candidates.extend(unconnected_potential);
+			}
+		}
 	}
 
 	view.candidate_storage.retain(|para_id, storage| {
@@ -343,7 +371,21 @@ fn prune_view_candidate_storage(view: &mut View, metrics: &Metrics) {
 		// This maintains a convenient invariant that para-id storage exists
 		// as long as there's an active head which schedules the para.
 		true
-	})
+	});
+
+	for (para_id, storage) in view.candidate_storage.iter() {
+		gum::trace!(
+			target: LOG_TARGET,
+			"Keeping a total of {} connected candidates for paraid {} in storage",
+			storage.candidates().count(),
+			para_id,
+		);
+	}
+
+	metrics.record_candidate_storage_size(
+		connected_candidates_count as u64,
+		live_candidates.len().saturating_sub(connected_candidates_count) as u64,
+	);
 }
 
 struct ImportablePendingAvailability {
@@ -365,22 +407,20 @@ async fn preprocess_candidates_pending_availability<Context>(
 	let expected_count = pending_availability.len();
 
 	for (i, pending) in pending_availability.into_iter().enumerate() {
-		let relay_parent =
-			match fetch_block_info(ctx, cache, pending.descriptor.relay_parent).await? {
-				None => {
-					gum::debug!(
-						target: LOG_TARGET,
-						?pending.candidate_hash,
-						?pending.descriptor.para_id,
-						index = ?i,
-						?expected_count,
-						"Had to stop processing pending candidates early due to missing info.",
-					);
+		let Some(relay_parent) =
+			fetch_block_info(ctx, cache, pending.descriptor.relay_parent).await?
+		else {
+			gum::debug!(
+				target: LOG_TARGET,
+				?pending.candidate_hash,
+				?pending.descriptor.para_id,
+				index = ?i,
+				?expected_count,
+				"Had to stop processing pending candidates early due to missing info.",
+			);
 
-					break
-				},
-				Some(b) => b,
-			};
+			break
+		};
 
 		let next_required_parent = pending.commitments.head_data.clone();
 		importable.push(ImportablePendingAvailability {
@@ -407,104 +447,139 @@ async fn preprocess_candidates_pending_availability<Context>(
 }
 
 #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)]
-async fn handle_candidate_introduced<Context>(
+async fn handle_introduce_seconded_candidate<Context>(
 	_ctx: &mut Context,
 	view: &mut View,
-	request: IntroduceCandidateRequest,
-	tx: oneshot::Sender<FragmentTreeMembership>,
-) -> JfyiErrorResult<()> {
-	let IntroduceCandidateRequest {
+	request: IntroduceSecondedCandidateRequest,
+	tx: oneshot::Sender<bool>,
+	metrics: &Metrics,
+) {
+	let _timer = metrics.time_introduce_seconded_candidate();
+
+	let IntroduceSecondedCandidateRequest {
 		candidate_para: para,
 		candidate_receipt: candidate,
 		persisted_validation_data: pvd,
 	} = request;
 
-	// Add the candidate to storage.
-	// Then attempt to add it to all trees.
-	let storage = match view.candidate_storage.get_mut(&para) {
-		None => {
-			gum::warn!(
-				target: LOG_TARGET,
-				para_id = ?para,
-				candidate_hash = ?candidate.hash(),
-				"Received seconded candidate for inactive para",
-			);
+	let Some(storage) = view.candidate_storage.get_mut(&para) else {
+		gum::warn!(
+			target: LOG_TARGET,
+			para_id = ?para,
+			candidate_hash = ?candidate.hash(),
+			"Received seconded candidate for inactive para",
+		);
 
-			let _ = tx.send(Vec::new());
-			return Ok(())
-		},
-		Some(storage) => storage,
+		let _ = tx.send(false);
+		return
 	};
 
-	let candidate_hash = match storage.add_candidate(candidate, pvd) {
-		Ok(c) => c,
-		Err(CandidateStorageInsertionError::CandidateAlreadyKnown(c)) => {
-			// Candidate known - return existing fragment tree membership.
-			let _ = tx.send(fragment_tree_membership(&view.active_leaves, para, c));
-			return Ok(())
-		},
-		Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) => {
-			// We can't log the candidate hash without either doing more ~expensive
-			// hashing but this branch indicates something is seriously wrong elsewhere
-			// so it's doubtful that it would affect debugging.
+	let parent_head_hash = pvd.parent_head.hash();
+	let output_head_hash = Some(candidate.commitments.head_data.hash());
+
+	// We first introduce the candidate in the storage and then try to extend the chain.
+	// If the candidate gets included in the chain, we can keep it in storage.
+	// If it doesn't, check that it's still a potential candidate in at least one fragment chain.
+	// If it's not, we can remove it.
+
+	let candidate_hash =
+		match storage.add_candidate(candidate.clone(), pvd, CandidateState::Seconded) {
+			Ok(c) => c,
+			Err(CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => {
+				gum::debug!(
+					target: LOG_TARGET,
+					para = ?para,
+					"Attempting to introduce an already known candidate: {:?}",
+					candidate.hash()
+				);
+				// Candidate already known.
+				let _ = tx.send(true);
+				return
+			},
+			Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) => {
+				// We can't log the candidate hash without either doing more ~expensive
+				// hashing but this branch indicates something is seriously wrong elsewhere
+				// so it's doubtful that it would affect debugging.
 
-			gum::warn!(
+				gum::warn!(
+					target: LOG_TARGET,
+					para = ?para,
+					"Received seconded candidate had mismatching validation data",
+				);
+
+				let _ = tx.send(false);
+				return
+			},
+		};
+
+	let mut keep_in_storage = false;
+	for (relay_parent, leaf_data) in view.active_leaves.iter_mut() {
+		if let Some(chain) = leaf_data.fragment_chains.get_mut(&para) {
+			gum::trace!(
 				target: LOG_TARGET,
 				para = ?para,
-				"Received seconded candidate had mismatching validation data",
+				?relay_parent,
+				"Candidates in chain before trying to introduce a new one: {:?}",
+				chain.to_vec()
 			);
+			chain.extend_from_storage(&*storage);
+			if chain.contains_candidate(&candidate_hash) {
+				keep_in_storage = true;
 
-			let _ = tx.send(Vec::new());
-			return Ok(())
-		},
-	};
+				gum::trace!(
+					target: LOG_TARGET,
+					?relay_parent,
+					para = ?para,
+					?candidate_hash,
+					"Added candidate to chain.",
+				);
+			} else {
+				match chain.can_add_candidate_as_potential(
+					&storage,
+					&candidate_hash,
+					&candidate.descriptor.relay_parent,
+					parent_head_hash,
+					output_head_hash,
+				) {
+					PotentialAddition::Anyhow => {
+						gum::trace!(
+							target: LOG_TARGET,
+							para = ?para,
+							?relay_parent,
+							?candidate_hash,
+							"Kept candidate as unconnected potential.",
+						);
 
-	let mut membership = Vec::new();
-	for (relay_parent, leaf_data) in &mut view.active_leaves {
-		if let Some(tree) = leaf_data.fragment_trees.get_mut(&para) {
-			tree.add_and_populate(candidate_hash, &*storage);
-			if let Some(depths) = tree.candidate(&candidate_hash) {
-				membership.push((*relay_parent, depths));
+						keep_in_storage = true;
+					},
+					_ => {
+						gum::trace!(
+							target: LOG_TARGET,
+							para = ?para,
+							?relay_parent,
+							"Not introducing a new candidate: {:?}",
+							candidate_hash
+						);
+					},
+				}
 			}
 		}
 	}
 
-	if membership.is_empty() {
+	// If there is at least one leaf where this candidate can be added or potentially added in the
+	// future, keep it in storage.
+	if !keep_in_storage {
 		storage.remove_candidate(&candidate_hash);
-	}
-
-	let _ = tx.send(membership);
-
-	Ok(())
-}
-
-fn handle_candidate_seconded(view: &mut View, para: ParaId, candidate_hash: CandidateHash) {
-	let storage = match view.candidate_storage.get_mut(&para) {
-		None => {
-			gum::warn!(
-				target: LOG_TARGET,
-				para_id = ?para,
-				?candidate_hash,
-				"Received instruction to second unknown candidate",
-			);
 
-			return
-		},
-		Some(storage) => storage,
-	};
-
-	if !storage.contains(&candidate_hash) {
-		gum::warn!(
+		gum::debug!(
 			target: LOG_TARGET,
-			para_id = ?para,
-			?candidate_hash,
-			"Received instruction to second unknown candidate",
+			para = ?para,
+			candidate = ?candidate_hash,
+			"Newly-seconded candidate cannot be kept under any active leaf",
 		);
-
-		return
 	}
 
-	storage.mark_seconded(&candidate_hash);
+	let _ = tx.send(keep_in_storage);
 }
 
 #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)]
@@ -513,19 +588,16 @@ async fn handle_candidate_backed<Context>(
 	view: &mut View,
 	para: ParaId,
 	candidate_hash: CandidateHash,
-) -> JfyiErrorResult<()> {
-	let storage = match view.candidate_storage.get_mut(&para) {
-		None => {
-			gum::warn!(
-				target: LOG_TARGET,
-				para_id = ?para,
-				?candidate_hash,
-				"Received instruction to back unknown candidate",
-			);
+) {
+	let Some(storage) = view.candidate_storage.get_mut(&para) else {
+		gum::warn!(
+			target: LOG_TARGET,
+			para_id = ?para,
+			?candidate_hash,
+			"Received instruction to back a candidate for unscheduled para",
+		);
 
-			return Ok(())
-		},
-		Some(storage) => storage,
+		return
 	};
 
 	if !storage.contains(&candidate_hash) {
@@ -536,7 +608,7 @@ async fn handle_candidate_backed<Context>(
 			"Received instruction to back unknown candidate",
 		);
 
-		return Ok(())
+		return
 	}
 
 	if storage.is_backed(&candidate_hash) {
@@ -547,11 +619,10 @@ async fn handle_candidate_backed<Context>(
 			"Received redundant instruction to mark candidate as backed",
 		);
 
-		return Ok(())
+		return
 	}
 
 	storage.mark_backed(&candidate_hash);
-	Ok(())
 }
 
 fn answer_get_backable_candidates(
@@ -562,62 +633,71 @@ fn answer_get_backable_candidates(
 	ancestors: Ancestors,
 	tx: oneshot::Sender<Vec<(CandidateHash, Hash)>>,
 ) {
-	let data = match view.active_leaves.get(&relay_parent) {
-		None => {
-			gum::debug!(
-				target: LOG_TARGET,
-				?relay_parent,
-				para_id = ?para,
-				"Requested backable candidate for inactive relay-parent."
-			);
+	let Some(data) = view.active_leaves.get(&relay_parent) else {
+		gum::debug!(
+			target: LOG_TARGET,
+			?relay_parent,
+			para_id = ?para,
+			"Requested backable candidate for inactive relay-parent."
+		);
 
-			let _ = tx.send(vec![]);
-			return
-		},
-		Some(d) => d,
+		let _ = tx.send(vec![]);
+		return
 	};
 
-	let tree = match data.fragment_trees.get(&para) {
-		None => {
-			gum::debug!(
-				target: LOG_TARGET,
-				?relay_parent,
-				para_id = ?para,
-				"Requested backable candidate for inactive para."
-			);
+	let Some(chain) = data.fragment_chains.get(&para) else {
+		gum::debug!(
+			target: LOG_TARGET,
+			?relay_parent,
+			para_id = ?para,
+			"Requested backable candidate for inactive para."
+		);
 
-			let _ = tx.send(vec![]);
-			return
-		},
-		Some(tree) => tree,
+		let _ = tx.send(vec![]);
+		return
 	};
 
-	let storage = match view.candidate_storage.get(&para) {
-		None => {
-			gum::warn!(
-				target: LOG_TARGET,
-				?relay_parent,
-				para_id = ?para,
-				"No candidate storage for active para",
-			);
+	let Some(storage) = view.candidate_storage.get(&para) else {
+		gum::warn!(
+			target: LOG_TARGET,
+			?relay_parent,
+			para_id = ?para,
+			"No candidate storage for active para",
+		);
 
-			let _ = tx.send(vec![]);
-			return
-		},
-		Some(s) => s,
+		let _ = tx.send(vec![]);
+		return
 	};
 
-	let backable_candidates: Vec<_> = tree
+	gum::trace!(
+		target: LOG_TARGET,
+		?relay_parent,
+		para_id = ?para,
+		"Candidate storage for para: {:?}",
+		storage.candidates().map(|candidate| candidate.hash()).collect::<Vec<_>>()
+	);
+
+	gum::trace!(
+		target: LOG_TARGET,
+		?relay_parent,
+		para_id = ?para,
+		"Candidate chain for para: {:?}",
+		chain.to_vec()
+	);
+
+	let backable_candidates: Vec<_> = chain
 		.find_backable_chain(ancestors.clone(), count, |candidate| storage.is_backed(candidate))
 		.into_iter()
 		.filter_map(|child_hash| {
-			storage.relay_parent_by_candidate_hash(&child_hash).map_or_else(
+			storage.relay_parent_of_candidate(&child_hash).map_or_else(
 				|| {
+					// Here, we'd actually need to trim all of the candidates that follow. Or
+					// not, the runtime will do this. Impossible scenario anyway.
 					gum::error!(
 						target: LOG_TARGET,
 						?child_hash,
 						para_id = ?para,
-						"Candidate is present in fragment tree but not in candidate's storage!",
+						"Candidate is present in fragment chain but not in candidate's storage!",
 					);
 					None
 				},
@@ -639,6 +719,7 @@ fn answer_get_backable_candidates(
 			target: LOG_TARGET,
 			?relay_parent,
 			?backable_candidates,
+			?ancestors,
 			"Found backable candidates",
 		);
 	}
@@ -646,58 +727,32 @@ fn answer_get_backable_candidates(
 	let _ = tx.send(backable_candidates);
 }
 
-fn answer_hypothetical_frontier_request(
+fn answer_hypothetical_membership_request(
 	view: &View,
-	request: HypotheticalFrontierRequest,
-	tx: oneshot::Sender<Vec<(HypotheticalCandidate, FragmentTreeMembership)>>,
+	request: HypotheticalMembershipRequest,
+	tx: oneshot::Sender<Vec<(HypotheticalCandidate, HypotheticalMembership)>>,
+	metrics: &Metrics,
 ) {
+	let _timer = metrics.time_hypothetical_membership_request();
+
 	let mut response = Vec::with_capacity(request.candidates.len());
 	for candidate in request.candidates {
-		response.push((candidate, Vec::new()));
+		response.push((candidate, vec![]));
 	}
 
-	let required_active_leaf = request.fragment_tree_relay_parent;
+	let required_active_leaf = request.fragment_chain_relay_parent;
 	for (active_leaf, leaf_view) in view
 		.active_leaves
 		.iter()
 		.filter(|(h, _)| required_active_leaf.as_ref().map_or(true, |x| h == &x))
 	{
-		for &mut (ref c, ref mut membership) in &mut response {
-			let fragment_tree = match leaf_view.fragment_trees.get(&c.candidate_para()) {
-				None => continue,
-				Some(f) => f,
-			};
-			let candidate_storage = match view.candidate_storage.get(&c.candidate_para()) {
-				None => continue,
-				Some(storage) => storage,
-			};
-
-			let candidate_hash = c.candidate_hash();
-			let hypothetical = match c {
-				HypotheticalCandidate::Complete { receipt, persisted_validation_data, .. } =>
-					fragment_chain::HypotheticalCandidate::Complete {
-						receipt: Cow::Borrowed(receipt),
-						persisted_validation_data: Cow::Borrowed(persisted_validation_data),
-					},
-				HypotheticalCandidate::Incomplete {
-					parent_head_data_hash,
-					candidate_relay_parent,
-					..
-				} => fragment_chain::HypotheticalCandidate::Incomplete {
-					relay_parent: *candidate_relay_parent,
-					parent_head_data_hash: *parent_head_data_hash,
-				},
-			};
-
-			let depths = fragment_tree.hypothetical_depths(
-				candidate_hash,
-				hypothetical,
-				candidate_storage,
-				request.backed_in_path_only,
-			);
+		for &mut (ref candidate, ref mut membership) in &mut response {
+			let para_id = &candidate.candidate_para();
+			let Some(fragment_chain) = leaf_view.fragment_chains.get(para_id) else { continue };
+			let Some(candidate_storage) = view.candidate_storage.get(para_id) else { continue };
 
-			if !depths.is_empty() {
-				membership.push((*active_leaf, depths));
+			if fragment_chain.hypothetical_membership(candidate.clone(), candidate_storage) {
+				membership.push(*active_leaf);
 			}
 		}
 	}
@@ -705,31 +760,6 @@ fn answer_hypothetical_frontier_request(
 	let _ = tx.send(response);
 }
 
-fn fragment_tree_membership(
-	active_leaves: &HashMap<Hash, RelayBlockViewData>,
-	para: ParaId,
-	candidate: CandidateHash,
-) -> FragmentTreeMembership {
-	let mut membership = Vec::new();
-	for (relay_parent, view_data) in active_leaves {
-		if let Some(tree) = view_data.fragment_trees.get(&para) {
-			if let Some(depths) = tree.candidate(&candidate) {
-				membership.push((*relay_parent, depths));
-			}
-		}
-	}
-	membership
-}
-
-fn answer_tree_membership_request(
-	view: &View,
-	para: ParaId,
-	candidate: CandidateHash,
-	tx: oneshot::Sender<FragmentTreeMembership>,
-) {
-	let _ = tx.send(fragment_tree_membership(&view.active_leaves, para, candidate));
-}
-
 fn answer_minimum_relay_parents_request(
 	view: &View,
 	relay_parent: Hash,
@@ -737,8 +767,8 @@ fn answer_minimum_relay_parents_request(
 ) {
 	let mut v = Vec::new();
 	if let Some(leaf_data) = view.active_leaves.get(&relay_parent) {
-		for (para_id, fragment_tree) in &leaf_data.fragment_trees {
-			v.push((*para_id, fragment_tree.scope().earliest_relay_parent().number));
+		for (para_id, fragment_chain) in &leaf_data.fragment_chains {
+			v.push((*para_id, fragment_chain.scope().earliest_relay_parent().number));
 		}
 	}
 
@@ -752,9 +782,9 @@ fn answer_prospective_validation_data_request(
 ) {
 	// 1. Try to get the head-data from the candidate store if known.
 	// 2. Otherwise, it might exist as the base in some relay-parent and we can find it by iterating
-	//    fragment trees.
+	//    fragment chains.
 	// 3. Otherwise, it is unknown.
-	// 4. Also try to find the relay parent block info by scanning fragment trees.
+	// 4. Also try to find the relay parent block info by scanning fragment chains.
 	// 5. If head data and relay parent block info are found - success. Otherwise, failure.
 
 	let storage = match view.candidate_storage.get(&request.para_id) {
@@ -776,35 +806,32 @@ fn answer_prospective_validation_data_request(
 	let mut relay_parent_info = None;
 	let mut max_pov_size = None;
 
-	for fragment_tree in view
+	for fragment_chain in view
 		.active_leaves
 		.values()
-		.filter_map(|x| x.fragment_trees.get(&request.para_id))
+		.filter_map(|x| x.fragment_chains.get(&request.para_id))
 	{
 		if head_data.is_some() && relay_parent_info.is_some() && max_pov_size.is_some() {
 			break
 		}
 		if relay_parent_info.is_none() {
-			relay_parent_info =
-				fragment_tree.scope().ancestor_by_hash(&request.candidate_relay_parent);
+			relay_parent_info = fragment_chain.scope().ancestor(&request.candidate_relay_parent);
 		}
 		if head_data.is_none() {
-			let required_parent = &fragment_tree.scope().base_constraints().required_parent;
+			let required_parent = &fragment_chain.scope().base_constraints().required_parent;
 			if required_parent.hash() == parent_head_data_hash {
 				head_data = Some(required_parent.clone());
 			}
 		}
 		if max_pov_size.is_none() {
-			let contains_ancestor = fragment_tree
-				.scope()
-				.ancestor_by_hash(&request.candidate_relay_parent)
-				.is_some();
+			let contains_ancestor =
+				fragment_chain.scope().ancestor(&request.candidate_relay_parent).is_some();
 			if contains_ancestor {
 				// We are leaning hard on two assumptions here.
-				// 1. That the fragment tree never contains allowed relay-parents whose session for
+				// 1. That the fragment chain never contains allowed relay-parents whose session for
 				//    children is different from that of the base block's.
 				// 2. That the max_pov_size is only configurable per session.
-				max_pov_size = Some(fragment_tree.scope().base_constraints().max_pov_size);
+				max_pov_size = Some(fragment_chain.scope().base_constraints().max_pov_size);
 			}
 		}
 	}
@@ -843,7 +870,6 @@ async fn fetch_backing_state<Context>(
 async fn fetch_upcoming_paras<Context>(
 	ctx: &mut Context,
 	relay_parent: Hash,
-	pending_availability: &mut HashSet<CandidateHash>,
 ) -> JfyiErrorResult<Vec<ParaId>> {
 	let (tx, rx) = oneshot::channel();
 
@@ -860,8 +886,6 @@ async fn fetch_upcoming_paras<Context>(
 	for core in cores {
 		match core {
 			CoreState::Occupied(occupied) => {
-				pending_availability.insert(occupied.candidate_hash);
-
 				if let Some(next_up_on_available) = occupied.next_up_on_available {
 					upcoming.insert(next_up_on_available.para_id);
 				}
diff --git a/polkadot/node/core/prospective-parachains/src/metrics.rs b/polkadot/node/core/prospective-parachains/src/metrics.rs
index 57061497a1c0d2923dedeea341a4cf4cf2ed8807..5abd9f56f306cdad515b93c794a51de516417b88 100644
--- a/polkadot/node/core/prospective-parachains/src/metrics.rs
+++ b/polkadot/node/core/prospective-parachains/src/metrics.rs
@@ -14,11 +14,18 @@
 // You should have received a copy of the GNU General Public License
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
-use polkadot_node_subsystem_util::metrics::{self, prometheus};
+use polkadot_node_subsystem::prometheus::Opts;
+use polkadot_node_subsystem_util::metrics::{
+	self,
+	prometheus::{self, GaugeVec, U64},
+};
 
 #[derive(Clone)]
 pub(crate) struct MetricsInner {
-	pub(crate) prune_view_candidate_storage: prometheus::Histogram,
+	prune_view_candidate_storage: prometheus::Histogram,
+	introduce_seconded_candidate: prometheus::Histogram,
+	hypothetical_membership: prometheus::Histogram,
+	candidate_storage_count: prometheus::GaugeVec<U64>,
 }
 
 /// Candidate backing metrics.
@@ -34,6 +41,40 @@ impl Metrics {
 			.as_ref()
 			.map(|metrics| metrics.prune_view_candidate_storage.start_timer())
 	}
+
+	/// Provide a timer for handling `IntroduceSecondedCandidate` which observes on drop.
+	pub fn time_introduce_seconded_candidate(
+		&self,
+	) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
+		self.0
+			.as_ref()
+			.map(|metrics| metrics.introduce_seconded_candidate.start_timer())
+	}
+
+	/// Provide a timer for handling `GetHypotheticalMembership` which observes on drop.
+	pub fn time_hypothetical_membership_request(
+		&self,
+	) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
+		self.0.as_ref().map(|metrics| metrics.hypothetical_membership.start_timer())
+	}
+
+	/// Record the size of the candidate storage. First param is the connected candidates count,
+	/// second param is the unconnected candidates count.
+	pub fn record_candidate_storage_size(&self, connected_count: u64, unconnected_count: u64) {
+		self.0.as_ref().map(|metrics| {
+			metrics
+				.candidate_storage_count
+				.with_label_values(&["connected"])
+				.set(connected_count)
+		});
+
+		self.0.as_ref().map(|metrics| {
+			metrics
+				.candidate_storage_count
+				.with_label_values(&["unconnected"])
+				.set(unconnected_count)
+		});
+	}
 }
 
 impl metrics::Metrics for Metrics {
@@ -46,6 +87,30 @@ impl metrics::Metrics for Metrics {
 				))?,
 				registry,
 			)?,
+			introduce_seconded_candidate: prometheus::register(
+				prometheus::Histogram::with_opts(prometheus::HistogramOpts::new(
+					"polkadot_parachain_prospective_parachains_introduce_seconded_candidate",
+					"Time spent within `prospective_parachains::handle_introduce_seconded_candidate`",
+				))?,
+				registry,
+			)?,
+			hypothetical_membership: prometheus::register(
+				prometheus::Histogram::with_opts(prometheus::HistogramOpts::new(
+					"polkadot_parachain_prospective_parachains_hypothetical_membership",
+					"Time spent responding to `GetHypotheticalMembership`",
+				))?,
+				registry,
+			)?,
+			candidate_storage_count: prometheus::register(
+				GaugeVec::new(
+					Opts::new(
+						"polkadot_parachain_prospective_parachains_candidate_storage_count",
+						"Number of candidates present in the candidate storage, split by connected and unconnected"
+					),
+					&["type"],
+				)?,
+				registry,
+			)?,
 		};
 		Ok(Metrics(Some(metrics)))
 	}
diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs
index 8989911a33239d3b6f775bcf6271c6f40d9a0bac..4bc47367278864e6f5e7136fe07a4fe1d1be88a5 100644
--- a/polkadot/node/core/prospective-parachains/src/tests.rs
+++ b/polkadot/node/core/prospective-parachains/src/tests.rs
@@ -19,7 +19,7 @@ use assert_matches::assert_matches;
 use polkadot_node_subsystem::{
 	errors::RuntimeApiError,
 	messages::{
-		AllMessages, HypotheticalFrontierRequest, ParentHeadData, ProspectiveParachainsMessage,
+		AllMessages, HypotheticalMembershipRequest, ParentHeadData, ProspectiveParachainsMessage,
 		ProspectiveValidationDataRequest,
 	},
 };
@@ -340,36 +340,42 @@ async fn deactivate_leaf(virtual_overseer: &mut VirtualOverseer, hash: Hash) {
 		.await;
 }
 
-async fn introduce_candidate(
+async fn introduce_seconded_candidate(
 	virtual_overseer: &mut VirtualOverseer,
 	candidate: CommittedCandidateReceipt,
 	pvd: PersistedValidationData,
 ) {
-	let req = IntroduceCandidateRequest {
+	let req = IntroduceSecondedCandidateRequest {
 		candidate_para: candidate.descriptor().para_id,
 		candidate_receipt: candidate,
 		persisted_validation_data: pvd,
 	};
-	let (tx, _) = oneshot::channel();
+	let (tx, rx) = oneshot::channel();
 	virtual_overseer
 		.send(overseer::FromOrchestra::Communication {
-			msg: ProspectiveParachainsMessage::IntroduceCandidate(req, tx),
+			msg: ProspectiveParachainsMessage::IntroduceSecondedCandidate(req, tx),
 		})
 		.await;
+	assert!(rx.await.unwrap());
 }
 
-async fn second_candidate(
+async fn introduce_seconded_candidate_failed(
 	virtual_overseer: &mut VirtualOverseer,
 	candidate: CommittedCandidateReceipt,
+	pvd: PersistedValidationData,
 ) {
+	let req = IntroduceSecondedCandidateRequest {
+		candidate_para: candidate.descriptor().para_id,
+		candidate_receipt: candidate,
+		persisted_validation_data: pvd,
+	};
+	let (tx, rx) = oneshot::channel();
 	virtual_overseer
 		.send(overseer::FromOrchestra::Communication {
-			msg: ProspectiveParachainsMessage::CandidateSeconded(
-				candidate.descriptor.para_id,
-				candidate.hash(),
-			),
+			msg: ProspectiveParachainsMessage::IntroduceSecondedCandidate(req, tx),
 		})
 		.await;
+	assert!(!rx.await.unwrap());
 }
 
 async fn back_candidate(
@@ -387,22 +393,6 @@ async fn back_candidate(
 		.await;
 }
 
-async fn get_membership(
-	virtual_overseer: &mut VirtualOverseer,
-	para_id: ParaId,
-	candidate_hash: CandidateHash,
-	expected_membership_response: Vec<(Hash, Vec<usize>)>,
-) {
-	let (tx, rx) = oneshot::channel();
-	virtual_overseer
-		.send(overseer::FromOrchestra::Communication {
-			msg: ProspectiveParachainsMessage::GetTreeMembership(para_id, candidate_hash, tx),
-		})
-		.await;
-	let resp = rx.await.unwrap();
-	assert_eq!(resp, expected_membership_response);
-}
-
 async fn get_backable_candidates(
 	virtual_overseer: &mut VirtualOverseer,
 	leaf: &TestLeaf,
@@ -420,42 +410,39 @@ async fn get_backable_candidates(
 		})
 		.await;
 	let resp = rx.await.unwrap();
-	assert_eq!(resp.len(), expected_result.len());
 	assert_eq!(resp, expected_result);
 }
 
-async fn get_hypothetical_frontier(
+async fn get_hypothetical_membership(
 	virtual_overseer: &mut VirtualOverseer,
 	candidate_hash: CandidateHash,
 	receipt: CommittedCandidateReceipt,
 	persisted_validation_data: PersistedValidationData,
-	fragment_tree_relay_parent: Hash,
-	backed_in_path_only: bool,
-	expected_depths: Vec<usize>,
+	expected_membership: Vec<Hash>,
 ) {
 	let hypothetical_candidate = HypotheticalCandidate::Complete {
 		candidate_hash,
 		receipt: Arc::new(receipt),
 		persisted_validation_data,
 	};
-	let request = HypotheticalFrontierRequest {
+	let request = HypotheticalMembershipRequest {
 		candidates: vec![hypothetical_candidate.clone()],
-		fragment_tree_relay_parent: Some(fragment_tree_relay_parent),
-		backed_in_path_only,
+		fragment_chain_relay_parent: None,
 	};
 	let (tx, rx) = oneshot::channel();
 	virtual_overseer
 		.send(overseer::FromOrchestra::Communication {
-			msg: ProspectiveParachainsMessage::GetHypotheticalFrontier(request, tx),
+			msg: ProspectiveParachainsMessage::GetHypotheticalMembership(request, tx),
 		})
 		.await;
-	let resp = rx.await.unwrap();
-	let expected_frontier = if expected_depths.is_empty() {
-		vec![(hypothetical_candidate, vec![])]
-	} else {
-		vec![(hypothetical_candidate, vec![(fragment_tree_relay_parent, expected_depths)])]
-	};
-	assert_eq!(resp, expected_frontier);
+	let mut resp = rx.await.unwrap();
+	assert_eq!(resp.len(), 1);
+	let (candidate, membership) = resp.remove(0);
+	assert_eq!(candidate, hypothetical_candidate);
+	assert_eq!(
+		membership.into_iter().collect::<HashSet<_>>(),
+		expected_membership.into_iter().collect::<HashSet<_>>()
+	);
 }
 
 async fn get_pvd(
@@ -513,11 +500,11 @@ fn should_do_no_work_if_async_backing_disabled_for_leaf() {
 }
 
 // Send some candidates and make sure all are found:
-// - Two for the same leaf A
+// - Two for the same leaf A (one for parachain 1 and one for parachain 2)
 // - One for leaf B on parachain 1
 // - One for leaf C on parachain 2
 #[test]
-fn send_candidates_and_check_if_found() {
+fn introduce_candidates_basic() {
 	let test_state = TestState::default();
 	let view = test_harness(|mut virtual_overseer| async move {
 		// Leaf A
@@ -563,7 +550,7 @@ fn send_candidates_and_check_if_found() {
 			test_state.validation_code_hash,
 		);
 		let candidate_hash_a1 = candidate_a1.hash();
-		let response_a1 = vec![(leaf_a.hash, vec![0])];
+		let response_a1 = vec![(candidate_hash_a1, leaf_a.hash)];
 
 		// Candidate A2
 		let (candidate_a2, pvd_a2) = make_candidate(
@@ -575,7 +562,7 @@ fn send_candidates_and_check_if_found() {
 			test_state.validation_code_hash,
 		);
 		let candidate_hash_a2 = candidate_a2.hash();
-		let response_a2 = vec![(leaf_a.hash, vec![0])];
+		let response_a2 = vec![(candidate_hash_a2, leaf_a.hash)];
 
 		// Candidate B
 		let (candidate_b, pvd_b) = make_candidate(
@@ -587,7 +574,7 @@ fn send_candidates_and_check_if_found() {
 			test_state.validation_code_hash,
 		);
 		let candidate_hash_b = candidate_b.hash();
-		let response_b = vec![(leaf_b.hash, vec![0])];
+		let response_b = vec![(candidate_hash_b, leaf_b.hash)];
 
 		// Candidate C
 		let (candidate_c, pvd_c) = make_candidate(
@@ -599,25 +586,78 @@ fn send_candidates_and_check_if_found() {
 			test_state.validation_code_hash,
 		);
 		let candidate_hash_c = candidate_c.hash();
-		let response_c = vec![(leaf_c.hash, vec![0])];
+		let response_c = vec![(candidate_hash_c, leaf_c.hash)];
 
 		// Introduce candidates.
-		introduce_candidate(&mut virtual_overseer, candidate_a1, pvd_a1).await;
-		introduce_candidate(&mut virtual_overseer, candidate_a2, pvd_a2).await;
-		introduce_candidate(&mut virtual_overseer, candidate_b, pvd_b).await;
-		introduce_candidate(&mut virtual_overseer, candidate_c, pvd_c).await;
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_a1.clone(), pvd_a1).await;
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_a2.clone(), pvd_a2).await;
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await;
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c).await;
+
+		// Back candidates. Otherwise, we cannot check membership with GetBackableCandidates.
+		back_candidate(&mut virtual_overseer, &candidate_a1, candidate_hash_a1).await;
+		back_candidate(&mut virtual_overseer, &candidate_a2, candidate_hash_a2).await;
+		back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await;
+		back_candidate(&mut virtual_overseer, &candidate_c, candidate_hash_c).await;
 
 		// Check candidate tree membership.
-		get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a1, response_a1).await;
-		get_membership(&mut virtual_overseer, 2.into(), candidate_hash_a2, response_a2).await;
-		get_membership(&mut virtual_overseer, 1.into(), candidate_hash_b, response_b).await;
-		get_membership(&mut virtual_overseer, 2.into(), candidate_hash_c, response_c).await;
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			1.into(),
+			Ancestors::default(),
+			5,
+			response_a1,
+		)
+		.await;
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			2.into(),
+			Ancestors::default(),
+			5,
+			response_a2,
+		)
+		.await;
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_b,
+			1.into(),
+			Ancestors::default(),
+			5,
+			response_b,
+		)
+		.await;
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_c,
+			2.into(),
+			Ancestors::default(),
+			5,
+			response_c,
+		)
+		.await;
+
+		// Check membership on other leaves.
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_b,
+			2.into(),
+			Ancestors::default(),
+			5,
+			vec![],
+		)
+		.await;
 
-		// The candidates should not be found on other parachains.
-		get_membership(&mut virtual_overseer, 2.into(), candidate_hash_a1, vec![]).await;
-		get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a2, vec![]).await;
-		get_membership(&mut virtual_overseer, 2.into(), candidate_hash_b, vec![]).await;
-		get_membership(&mut virtual_overseer, 1.into(), candidate_hash_c, vec![]).await;
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_c,
+			1.into(),
+			Ancestors::default(),
+			5,
+			vec![],
+		)
+		.await;
 
 		virtual_overseer
 	});
@@ -629,10 +669,8 @@ fn send_candidates_and_check_if_found() {
 	assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (2, 2));
 }
 
-// Send some candidates, check if the candidate won't be found once its relay parent leaves the
-// view.
 #[test]
-fn check_candidate_parent_leaving_view() {
+fn introduce_candidate_multiple_times() {
 	let test_state = TestState::default();
 	let view = test_harness(|mut virtual_overseer| async move {
 		// Leaf A
@@ -644,32 +682,11 @@ fn check_candidate_parent_leaving_view() {
 				(2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))),
 			],
 		};
-		// Leaf B
-		let leaf_b = TestLeaf {
-			number: 101,
-			hash: Hash::from_low_u64_be(131),
-			para_data: vec![
-				(1.into(), PerParaData::new(99, HeadData(vec![3, 4, 5]))),
-				(2.into(), PerParaData::new(101, HeadData(vec![4, 5, 6]))),
-			],
-		};
-		// Leaf C
-		let leaf_c = TestLeaf {
-			number: 102,
-			hash: Hash::from_low_u64_be(132),
-			para_data: vec![
-				(1.into(), PerParaData::new(102, HeadData(vec![5, 6, 7]))),
-				(2.into(), PerParaData::new(98, HeadData(vec![6, 7, 8]))),
-			],
-		};
-
 		// Activate leaves.
 		activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await;
-		activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await;
-		activate_leaf(&mut virtual_overseer, &leaf_c, &test_state).await;
 
-		// Candidate A1
-		let (candidate_a1, pvd_a1) = make_candidate(
+		// Candidate A.
+		let (candidate_a, pvd_a) = make_candidate(
 			leaf_a.hash,
 			leaf_a.number,
 			1.into(),
@@ -677,86 +694,45 @@ fn check_candidate_parent_leaving_view() {
 			HeadData(vec![1]),
 			test_state.validation_code_hash,
 		);
-		let candidate_hash_a1 = candidate_a1.hash();
-
-		// Candidate A2
-		let (candidate_a2, pvd_a2) = make_candidate(
-			leaf_a.hash,
-			leaf_a.number,
-			2.into(),
-			HeadData(vec![2, 3, 4]),
-			HeadData(vec![2]),
-			test_state.validation_code_hash,
-		);
-		let candidate_hash_a2 = candidate_a2.hash();
-
-		// Candidate B
-		let (candidate_b, pvd_b) = make_candidate(
-			leaf_b.hash,
-			leaf_b.number,
-			1.into(),
-			HeadData(vec![3, 4, 5]),
-			HeadData(vec![3]),
-			test_state.validation_code_hash,
-		);
-		let candidate_hash_b = candidate_b.hash();
-		let response_b = vec![(leaf_b.hash, vec![0])];
-
-		// Candidate C
-		let (candidate_c, pvd_c) = make_candidate(
-			leaf_c.hash,
-			leaf_c.number,
-			2.into(),
-			HeadData(vec![6, 7, 8]),
-			HeadData(vec![4]),
-			test_state.validation_code_hash,
-		);
-		let candidate_hash_c = candidate_c.hash();
-		let response_c = vec![(leaf_c.hash, vec![0])];
+		let candidate_hash_a = candidate_a.hash();
+		let response_a = vec![(candidate_hash_a, leaf_a.hash)];
 
 		// Introduce candidates.
-		introduce_candidate(&mut virtual_overseer, candidate_a1, pvd_a1).await;
-		introduce_candidate(&mut virtual_overseer, candidate_a2, pvd_a2).await;
-		introduce_candidate(&mut virtual_overseer, candidate_b, pvd_b).await;
-		introduce_candidate(&mut virtual_overseer, candidate_c, pvd_c).await;
-
-		// Deactivate leaf A.
-		deactivate_leaf(&mut virtual_overseer, leaf_a.hash).await;
-
-		// Candidates A1 and A2 should be gone. Candidates B and C should remain.
-		get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a1, vec![]).await;
-		get_membership(&mut virtual_overseer, 2.into(), candidate_hash_a2, vec![]).await;
-		get_membership(&mut virtual_overseer, 1.into(), candidate_hash_b, response_b).await;
-		get_membership(&mut virtual_overseer, 2.into(), candidate_hash_c, response_c.clone()).await;
-
-		// Deactivate leaf B.
-		deactivate_leaf(&mut virtual_overseer, leaf_b.hash).await;
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone())
+			.await;
 
-		// Candidate B should be gone, C should remain.
-		get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a1, vec![]).await;
-		get_membership(&mut virtual_overseer, 2.into(), candidate_hash_a2, vec![]).await;
-		get_membership(&mut virtual_overseer, 1.into(), candidate_hash_b, vec![]).await;
-		get_membership(&mut virtual_overseer, 2.into(), candidate_hash_c, response_c).await;
+		// Back candidates. Otherwise, we cannot check membership with GetBackableCandidates.
+		back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await;
 
-		// Deactivate leaf C.
-		deactivate_leaf(&mut virtual_overseer, leaf_c.hash).await;
+		// Check candidate tree membership.
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			1.into(),
+			Ancestors::default(),
+			5,
+			response_a,
+		)
+		.await;
 
-		// Candidate C should be gone.
-		get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a1, vec![]).await;
-		get_membership(&mut virtual_overseer, 2.into(), candidate_hash_a2, vec![]).await;
-		get_membership(&mut virtual_overseer, 1.into(), candidate_hash_b, vec![]).await;
-		get_membership(&mut virtual_overseer, 2.into(), candidate_hash_c, vec![]).await;
+		// Introduce the same candidate multiple times. It'll return true but it won't be added.
+		// We'll check below that the candidate count remains 1.
+		for _ in 0..5 {
+			introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone())
+				.await;
+		}
 
 		virtual_overseer
 	});
 
-	assert_eq!(view.active_leaves.len(), 0);
-	assert_eq!(view.candidate_storage.len(), 0);
+	assert_eq!(view.active_leaves.len(), 1);
+	assert_eq!(view.candidate_storage.len(), 2);
+	assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (1, 1));
+	assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0));
 }
 
-// Introduce a candidate to multiple forks, see how the membership is returned.
 #[test]
-fn check_candidate_on_multiple_forks() {
+fn fragment_chain_length_is_bounded() {
 	let test_state = TestState::default();
 	let view = test_harness(|mut virtual_overseer| async move {
 		// Leaf A
@@ -768,31 +744,16 @@ fn check_candidate_on_multiple_forks() {
 				(2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))),
 			],
 		};
-		// Leaf B
-		let leaf_b = TestLeaf {
-			number: 101,
-			hash: Hash::from_low_u64_be(131),
-			para_data: vec![
-				(1.into(), PerParaData::new(99, HeadData(vec![3, 4, 5]))),
-				(2.into(), PerParaData::new(101, HeadData(vec![4, 5, 6]))),
-			],
-		};
-		// Leaf C
-		let leaf_c = TestLeaf {
-			number: 102,
-			hash: Hash::from_low_u64_be(132),
-			para_data: vec![
-				(1.into(), PerParaData::new(102, HeadData(vec![5, 6, 7]))),
-				(2.into(), PerParaData::new(98, HeadData(vec![6, 7, 8]))),
-			],
-		};
-
 		// Activate leaves.
-		activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await;
-		activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await;
-		activate_leaf(&mut virtual_overseer, &leaf_c, &test_state).await;
+		activate_leaf_with_params(
+			&mut virtual_overseer,
+			&leaf_a,
+			&test_state,
+			AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 3 },
+		)
+		.await;
 
-		// Candidate on leaf A.
+		// Candidates A, B and C form a chain.
 		let (candidate_a, pvd_a) = make_candidate(
 			leaf_a.hash,
 			leaf_a.number,
@@ -801,56 +762,59 @@ fn check_candidate_on_multiple_forks() {
 			HeadData(vec![1]),
 			test_state.validation_code_hash,
 		);
-		let candidate_hash_a = candidate_a.hash();
-		let response_a = vec![(leaf_a.hash, vec![0])];
-
-		// Candidate on leaf B.
 		let (candidate_b, pvd_b) = make_candidate(
-			leaf_b.hash,
-			leaf_b.number,
+			leaf_a.hash,
+			leaf_a.number,
 			1.into(),
-			HeadData(vec![3, 4, 5]),
 			HeadData(vec![1]),
+			HeadData(vec![2]),
 			test_state.validation_code_hash,
 		);
-		let candidate_hash_b = candidate_b.hash();
-		let response_b = vec![(leaf_b.hash, vec![0])];
-
-		// Candidate on leaf C.
 		let (candidate_c, pvd_c) = make_candidate(
-			leaf_c.hash,
-			leaf_c.number,
+			leaf_a.hash,
+			leaf_a.number,
 			1.into(),
-			HeadData(vec![5, 6, 7]),
-			HeadData(vec![1]),
+			HeadData(vec![2]),
+			HeadData(vec![3]),
 			test_state.validation_code_hash,
 		);
-		let candidate_hash_c = candidate_c.hash();
-		let response_c = vec![(leaf_c.hash, vec![0])];
 
-		// Introduce candidates on all three leaves.
-		introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await;
-		introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await;
-		introduce_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c).await;
+		// Introduce candidates A and B. Since max depth is 1, only these two will be allowed.
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone())
+			.await;
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b.clone())
+			.await;
+
+		// Back candidates. Otherwise, we cannot check membership with GetBackableCandidates.
+		back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await;
+		back_candidate(&mut virtual_overseer, &candidate_b, candidate_b.hash()).await;
 
 		// Check candidate tree membership.
-		get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a, response_a).await;
-		get_membership(&mut virtual_overseer, 1.into(), candidate_hash_b, response_b).await;
-		get_membership(&mut virtual_overseer, 1.into(), candidate_hash_c, response_c).await;
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			1.into(),
+			Ancestors::default(),
+			5,
+			vec![(candidate_a.hash(), leaf_a.hash), (candidate_b.hash(), leaf_a.hash)],
+		)
+		.await;
+
+		// Introducing C will fail.
+		introduce_seconded_candidate_failed(&mut virtual_overseer, candidate_c, pvd_c.clone())
+			.await;
 
 		virtual_overseer
 	});
 
-	assert_eq!(view.active_leaves.len(), 3);
+	assert_eq!(view.active_leaves.len(), 1);
 	assert_eq!(view.candidate_storage.len(), 2);
-	// Three parents and three candidates on para 1.
-	assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (3, 3));
+	assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (2, 2));
 	assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0));
 }
 
-// Backs some candidates and tests `GetBackableCandidates` when requesting a single candidate.
 #[test]
-fn check_backable_query_single_candidate() {
+fn unconnected_candidate_count_is_bounded() {
 	let test_state = TestState::default();
 	let view = test_harness(|mut virtual_overseer| async move {
 		// Leaf A
@@ -862,54 +826,534 @@ fn check_backable_query_single_candidate() {
 				(2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))),
 			],
 		};
-
 		// Activate leaves.
-		activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await;
+		activate_leaf_with_params(
+			&mut virtual_overseer,
+			&leaf_a,
+			&test_state,
+			AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 3 },
+		)
+		.await;
 
-		// Candidate A
+		// Candidates A, B and C are all potential candidates but don't form a chain.
 		let (candidate_a, pvd_a) = make_candidate(
 			leaf_a.hash,
 			leaf_a.number,
 			1.into(),
-			HeadData(vec![1, 2, 3]),
 			HeadData(vec![1]),
+			HeadData(vec![2]),
 			test_state.validation_code_hash,
 		);
-		let candidate_hash_a = candidate_a.hash();
-
-		// Candidate B
-		let (mut candidate_b, pvd_b) = make_candidate(
+		let (candidate_b, pvd_b) = make_candidate(
 			leaf_a.hash,
 			leaf_a.number,
 			1.into(),
-			HeadData(vec![1]),
-			HeadData(vec![2]),
+			HeadData(vec![3]),
+			HeadData(vec![4]),
+			test_state.validation_code_hash,
+		);
+		let (candidate_c, pvd_c) = make_candidate(
+			leaf_a.hash,
+			leaf_a.number,
+			1.into(),
+			HeadData(vec![4]),
+			HeadData(vec![5]),
 			test_state.validation_code_hash,
 		);
-		// Set a field to make this candidate unique.
-		candidate_b.descriptor.para_head = Hash::from_low_u64_le(1000);
-		let candidate_hash_b = candidate_b.hash();
-
-		// Introduce candidates.
-		introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await;
-		introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await;
 
-		// Should not get any backable candidates.
-		get_backable_candidates(
+		// Introduce candidates A and B. Although max depth is 1 (which should allow for two
+		// candidates), only 1 is allowed, because the last candidate must be a connected candidate.
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone())
+			.await;
+		introduce_seconded_candidate_failed(
 			&mut virtual_overseer,
-			&leaf_a,
+			candidate_b.clone(),
+			pvd_b.clone(),
+		)
+		.await;
+
+		// Back candidates. Otherwise, we cannot check membership with GetBackableCandidates.
+		back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await;
+
+		// Check candidate tree membership. Should be empty.
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			1.into(),
+			Ancestors::default(),
+			5,
+			vec![],
+		)
+		.await;
+
+		// Introducing C will also fail.
+		introduce_seconded_candidate_failed(&mut virtual_overseer, candidate_c, pvd_c.clone())
+			.await;
+
+		virtual_overseer
+	});
+
+	assert_eq!(view.active_leaves.len(), 1);
+	assert_eq!(view.candidate_storage.len(), 2);
+	assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (1, 1));
+	assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0));
+}
+
+// Send some candidates, check if the candidate won't be found once its relay parent leaves the
+// view.
+#[test]
+fn introduce_candidate_parent_leaving_view() {
+	let test_state = TestState::default();
+	let view = test_harness(|mut virtual_overseer| async move {
+		// Leaf A
+		let leaf_a = TestLeaf {
+			number: 100,
+			hash: Hash::from_low_u64_be(130),
+			para_data: vec![
+				(1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))),
+				(2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))),
+			],
+		};
+		// Leaf B
+		let leaf_b = TestLeaf {
+			number: 101,
+			hash: Hash::from_low_u64_be(131),
+			para_data: vec![
+				(1.into(), PerParaData::new(99, HeadData(vec![3, 4, 5]))),
+				(2.into(), PerParaData::new(101, HeadData(vec![4, 5, 6]))),
+			],
+		};
+		// Leaf C
+		let leaf_c = TestLeaf {
+			number: 102,
+			hash: Hash::from_low_u64_be(132),
+			para_data: vec![
+				(1.into(), PerParaData::new(102, HeadData(vec![5, 6, 7]))),
+				(2.into(), PerParaData::new(98, HeadData(vec![6, 7, 8]))),
+			],
+		};
+
+		// Activate leaves.
+		activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await;
+		activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await;
+		activate_leaf(&mut virtual_overseer, &leaf_c, &test_state).await;
+
+		// Candidate A1
+		let (candidate_a1, pvd_a1) = make_candidate(
+			leaf_a.hash,
+			leaf_a.number,
+			1.into(),
+			HeadData(vec![1, 2, 3]),
+			HeadData(vec![1]),
+			test_state.validation_code_hash,
+		);
+		let candidate_hash_a1 = candidate_a1.hash();
+
+		// Candidate A2
+		let (candidate_a2, pvd_a2) = make_candidate(
+			leaf_a.hash,
+			leaf_a.number,
+			2.into(),
+			HeadData(vec![2, 3, 4]),
+			HeadData(vec![2]),
+			test_state.validation_code_hash,
+		);
+		let candidate_hash_a2 = candidate_a2.hash();
+
+		// Candidate B
+		let (candidate_b, pvd_b) = make_candidate(
+			leaf_b.hash,
+			leaf_b.number,
+			1.into(),
+			HeadData(vec![3, 4, 5]),
+			HeadData(vec![3]),
+			test_state.validation_code_hash,
+		);
+		let candidate_hash_b = candidate_b.hash();
+		let response_b = vec![(candidate_hash_b, leaf_b.hash)];
+
+		// Candidate C
+		let (candidate_c, pvd_c) = make_candidate(
+			leaf_c.hash,
+			leaf_c.number,
+			2.into(),
+			HeadData(vec![6, 7, 8]),
+			HeadData(vec![4]),
+			test_state.validation_code_hash,
+		);
+		let candidate_hash_c = candidate_c.hash();
+		let response_c = vec![(candidate_hash_c, leaf_c.hash)];
+
+		// Introduce candidates.
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_a1.clone(), pvd_a1).await;
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_a2.clone(), pvd_a2).await;
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await;
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c).await;
+
+		// Back candidates. Otherwise, we cannot check membership with GetBackableCandidates.
+		back_candidate(&mut virtual_overseer, &candidate_a1, candidate_hash_a1).await;
+		back_candidate(&mut virtual_overseer, &candidate_a2, candidate_hash_a2).await;
+		back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await;
+		back_candidate(&mut virtual_overseer, &candidate_c, candidate_hash_c).await;
+
+		// Deactivate leaf A.
+		deactivate_leaf(&mut virtual_overseer, leaf_a.hash).await;
+
+		// Candidates A1 and A2 should be gone. Candidates B and C should remain.
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			1.into(),
+			Ancestors::default(),
+			5,
+			vec![],
+		)
+		.await;
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			2.into(),
+			Ancestors::default(),
+			5,
+			vec![],
+		)
+		.await;
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_b,
+			1.into(),
+			Ancestors::default(),
+			5,
+			response_b,
+		)
+		.await;
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_c,
+			2.into(),
+			Ancestors::default(),
+			5,
+			response_c.clone(),
+		)
+		.await;
+
+		// Deactivate leaf B.
+		deactivate_leaf(&mut virtual_overseer, leaf_b.hash).await;
+
+		// Candidate B should be gone, C should remain.
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			1.into(),
+			Ancestors::default(),
+			5,
+			vec![],
+		)
+		.await;
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			2.into(),
+			Ancestors::default(),
+			5,
+			vec![],
+		)
+		.await;
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_b,
+			1.into(),
+			Ancestors::default(),
+			5,
+			vec![],
+		)
+		.await;
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_c,
+			2.into(),
+			Ancestors::default(),
+			5,
+			response_c,
+		)
+		.await;
+
+		// Deactivate leaf C.
+		deactivate_leaf(&mut virtual_overseer, leaf_c.hash).await;
+
+		// Candidate C should be gone.
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			1.into(),
+			Ancestors::default(),
+			5,
+			vec![],
+		)
+		.await;
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			2.into(),
+			Ancestors::default(),
+			5,
+			vec![],
+		)
+		.await;
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_b,
+			1.into(),
+			Ancestors::default(),
+			5,
+			vec![],
+		)
+		.await;
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_c,
+			2.into(),
+			Ancestors::default(),
+			5,
+			vec![],
+		)
+		.await;
+
+		virtual_overseer
+	});
+
+	assert_eq!(view.active_leaves.len(), 0);
+	assert_eq!(view.candidate_storage.len(), 0);
+}
+
+// Introduce a candidate to multiple forks, see how the membership is returned.
+#[test]
+fn introduce_candidate_on_multiple_forks() {
+	let test_state = TestState::default();
+	let view = test_harness(|mut virtual_overseer| async move {
+		// Leaf B
+		let leaf_b = TestLeaf {
+			number: 101,
+			hash: Hash::from_low_u64_be(131),
+			para_data: vec![
+				(1.into(), PerParaData::new(99, HeadData(vec![1, 2, 3]))),
+				(2.into(), PerParaData::new(101, HeadData(vec![4, 5, 6]))),
+			],
+		};
+		// Leaf A
+		let leaf_a = TestLeaf {
+			number: 100,
+			hash: get_parent_hash(leaf_b.hash),
+			para_data: vec![
+				(1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))),
+				(2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))),
+			],
+		};
+
+		// Activate leaves.
+		activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await;
+		activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await;
+
+		// Candidate built on leaf A.
+		let (candidate_a, pvd_a) = make_candidate(
+			leaf_a.hash,
+			leaf_a.number,
+			1.into(),
+			HeadData(vec![1, 2, 3]),
+			HeadData(vec![1]),
+			test_state.validation_code_hash,
+		);
+		let candidate_hash_a = candidate_a.hash();
+		let response_a = vec![(candidate_hash_a, leaf_a.hash)];
+
+		// Introduce candidate. Should be present on leaves B and C.
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await;
+		back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await;
+
+		// Check candidate tree membership.
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			1.into(),
+			Ancestors::default(),
+			5,
+			response_a.clone(),
+		)
+		.await;
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_b,
+			1.into(),
+			Ancestors::default(),
+			5,
+			response_a.clone(),
+		)
+		.await;
+
+		virtual_overseer
+	});
+
+	assert_eq!(view.active_leaves.len(), 2);
+	assert_eq!(view.candidate_storage.len(), 2);
+	assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (1, 1));
+	assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0));
+}
+
+#[test]
+fn unconnected_candidates_become_connected() {
+	let test_state = TestState::default();
+	let view = test_harness(|mut virtual_overseer| async move {
+		// Leaf A
+		let leaf_a = TestLeaf {
+			number: 100,
+			hash: Hash::from_low_u64_be(130),
+			para_data: vec![
+				(1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))),
+				(2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))),
+			],
+		};
+		// Activate leaves.
+		activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await;
+
+		// Candidates A, B, C and D all form a chain, but we'll first introduce A, C and D.
+		let (candidate_a, pvd_a) = make_candidate(
+			leaf_a.hash,
+			leaf_a.number,
 			1.into(),
-			vec![candidate_hash_a].into_iter().collect(),
-			1,
-			vec![],
+			HeadData(vec![1, 2, 3]),
+			HeadData(vec![1]),
+			test_state.validation_code_hash,
+		);
+		let (candidate_b, pvd_b) = make_candidate(
+			leaf_a.hash,
+			leaf_a.number,
+			1.into(),
+			HeadData(vec![1]),
+			HeadData(vec![2]),
+			test_state.validation_code_hash,
+		);
+		let (candidate_c, pvd_c) = make_candidate(
+			leaf_a.hash,
+			leaf_a.number,
+			1.into(),
+			HeadData(vec![2]),
+			HeadData(vec![3]),
+			test_state.validation_code_hash,
+		);
+		let (candidate_d, pvd_d) = make_candidate(
+			leaf_a.hash,
+			leaf_a.number,
+			1.into(),
+			HeadData(vec![3]),
+			HeadData(vec![4]),
+			test_state.validation_code_hash,
+		);
+
+		// Introduce candidates A, C and D.
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone())
+			.await;
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c.clone())
+			.await;
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_d.clone(), pvd_d.clone())
+			.await;
+
+		// Back candidates. Otherwise, we cannot check membership with GetBackableCandidates.
+		back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await;
+		back_candidate(&mut virtual_overseer, &candidate_c, candidate_c.hash()).await;
+		back_candidate(&mut virtual_overseer, &candidate_d, candidate_d.hash()).await;
+
+		// Check candidate tree membership. Only A should be returned.
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			1.into(),
+			Ancestors::default(),
+			5,
+			vec![(candidate_a.hash(), leaf_a.hash)],
+		)
+		.await;
+
+		// Introduce C and check membership. Full chain should be returned.
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b.clone())
+			.await;
+		back_candidate(&mut virtual_overseer, &candidate_b, candidate_b.hash()).await;
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			1.into(),
+			Ancestors::default(),
+			5,
+			vec![
+				(candidate_a.hash(), leaf_a.hash),
+				(candidate_b.hash(), leaf_a.hash),
+				(candidate_c.hash(), leaf_a.hash),
+				(candidate_d.hash(), leaf_a.hash),
+			],
 		)
 		.await;
+
+		virtual_overseer
+	});
+
+	assert_eq!(view.active_leaves.len(), 1);
+	assert_eq!(view.candidate_storage.len(), 2);
+	assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (4, 4));
+	assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0));
+}
+
+// Backs some candidates and tests `GetBackableCandidates` when requesting a single candidate.
+#[test]
+fn check_backable_query_single_candidate() {
+	let test_state = TestState::default();
+	let view = test_harness(|mut virtual_overseer| async move {
+		// Leaf A
+		let leaf_a = TestLeaf {
+			number: 100,
+			hash: Hash::from_low_u64_be(130),
+			para_data: vec![
+				(1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))),
+				(2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))),
+			],
+		};
+
+		// Activate leaves.
+		activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await;
+
+		// Candidate A
+		let (candidate_a, pvd_a) = make_candidate(
+			leaf_a.hash,
+			leaf_a.number,
+			1.into(),
+			HeadData(vec![1, 2, 3]),
+			HeadData(vec![1]),
+			test_state.validation_code_hash,
+		);
+		let candidate_hash_a = candidate_a.hash();
+
+		// Candidate B
+		let (mut candidate_b, pvd_b) = make_candidate(
+			leaf_a.hash,
+			leaf_a.number,
+			1.into(),
+			HeadData(vec![1]),
+			HeadData(vec![2]),
+			test_state.validation_code_hash,
+		);
+		// Set a field to make this candidate unique.
+		candidate_b.descriptor.para_head = Hash::from_low_u64_le(1000);
+		let candidate_hash_b = candidate_b.hash();
+
+		// Introduce candidates.
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await;
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await;
+
+		// Should not get any backable candidates.
 		get_backable_candidates(
 			&mut virtual_overseer,
 			&leaf_a,
 			1.into(),
 			vec![candidate_hash_a].into_iter().collect(),
-			0,
+			1,
 			vec![],
 		)
 		.await;
@@ -917,23 +1361,17 @@ fn check_backable_query_single_candidate() {
 			&mut virtual_overseer,
 			&leaf_a,
 			1.into(),
-			Ancestors::new(),
+			vec![candidate_hash_a].into_iter().collect(),
 			0,
 			vec![],
 		)
 		.await;
-
-		// Second candidates.
-		second_candidate(&mut virtual_overseer, candidate_a.clone()).await;
-		second_candidate(&mut virtual_overseer, candidate_b.clone()).await;
-
-		// Should not get any backable candidates.
 		get_backable_candidates(
 			&mut virtual_overseer,
 			&leaf_a,
 			1.into(),
-			vec![candidate_hash_a].into_iter().collect(),
-			1,
+			Ancestors::new(),
+			0,
 			vec![],
 		)
 		.await;
@@ -1019,392 +1457,327 @@ fn check_backable_query_multiple_candidates() {
 			// Set a field to make this candidate unique.
 			candidate.descriptor.para_head = Hash::from_low_u64_le($index);
 			let candidate_hash = candidate.hash();
-			introduce_candidate(&mut $virtual_overseer, candidate.clone(), pvd).await;
-			second_candidate(&mut $virtual_overseer, candidate.clone()).await;
+			introduce_seconded_candidate(&mut $virtual_overseer, candidate.clone(), pvd).await;
 			back_candidate(&mut $virtual_overseer, &candidate, candidate_hash).await;
 
 			(candidate, candidate_hash)
 		}};
 	}
 
-	// Parachain 1 looks like this:
-	//          +---A----+
-	//          |        |
-	//     +----B---+    C
-	//     |    |   |    |
-	//     D    E   F    H
-	//              |    |
-	//              G    I
-	//                   |
-	//                   J
-	{
-		let test_state = TestState::default();
-		let view = test_harness(|mut virtual_overseer| async move {
-			// Leaf A
-			let leaf_a = TestLeaf {
-				number: 100,
-				hash: Hash::from_low_u64_be(130),
-				para_data: vec![
-					(1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))),
-					(2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))),
-				],
-			};
-
-			// Activate leaves.
-			activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await;
-
-			// Candidate A
-			let (candidate_a, pvd_a) = make_candidate(
-				leaf_a.hash,
-				leaf_a.number,
-				1.into(),
-				HeadData(vec![1, 2, 3]),
-				HeadData(vec![1]),
-				test_state.validation_code_hash,
-			);
-			let candidate_hash_a = candidate_a.hash();
-			introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await;
-			second_candidate(&mut virtual_overseer, candidate_a.clone()).await;
-			back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await;
-
-			let (candidate_b, candidate_hash_b) =
-				make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_a, 2);
-			let (candidate_c, candidate_hash_c) =
-				make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_a, 3);
-			let (_candidate_d, candidate_hash_d) =
-				make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 4);
-			let (_candidate_e, candidate_hash_e) =
-				make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 5);
-			let (candidate_f, candidate_hash_f) =
-				make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 6);
-			let (_candidate_g, candidate_hash_g) =
-				make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_f, 7);
-			let (candidate_h, candidate_hash_h) =
-				make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_c, 8);
-			let (candidate_i, candidate_hash_i) =
-				make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_h, 9);
-			let (_candidate_j, candidate_hash_j) =
-				make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_i, 10);
-
-			// Should not get any backable candidates for the other para.
-			get_backable_candidates(
-				&mut virtual_overseer,
-				&leaf_a,
-				2.into(),
-				Ancestors::new(),
-				1,
-				vec![],
-			)
-			.await;
-			get_backable_candidates(
-				&mut virtual_overseer,
-				&leaf_a,
-				2.into(),
-				Ancestors::new(),
-				5,
-				vec![],
-			)
-			.await;
-			get_backable_candidates(
-				&mut virtual_overseer,
-				&leaf_a,
-				2.into(),
-				vec![candidate_hash_a].into_iter().collect(),
-				1,
-				vec![],
-			)
-			.await;
-
-			// Test various scenarios with various counts.
-
-			// empty required_path
-			{
-				get_backable_candidates(
-					&mut virtual_overseer,
-					&leaf_a,
-					1.into(),
-					Ancestors::new(),
-					1,
-					vec![(candidate_hash_a, leaf_a.hash)],
-				)
-				.await;
-				get_backable_candidates(
-					&mut virtual_overseer,
-					&leaf_a,
-					1.into(),
-					Ancestors::new(),
-					4,
-					vec![
-						(candidate_hash_a, leaf_a.hash),
-						(candidate_hash_b, leaf_a.hash),
-						(candidate_hash_f, leaf_a.hash),
-						(candidate_hash_g, leaf_a.hash),
-					],
-				)
-				.await;
-			}
-
-			// required path of 1
-			{
-				get_backable_candidates(
-					&mut virtual_overseer,
-					&leaf_a,
-					1.into(),
-					vec![candidate_hash_a].into_iter().collect(),
-					1,
-					vec![(candidate_hash_b, leaf_a.hash)],
-				)
-				.await;
-				get_backable_candidates(
-					&mut virtual_overseer,
-					&leaf_a,
-					1.into(),
-					vec![candidate_hash_a].into_iter().collect(),
-					3,
-					vec![
-						(candidate_hash_b, leaf_a.hash),
-						(candidate_hash_f, leaf_a.hash),
-						(candidate_hash_g, leaf_a.hash),
-					],
-				)
-				.await;
+	let test_state = TestState::default();
+	let view = test_harness(|mut virtual_overseer| async move {
+		// Leaf A
+		let leaf_a = TestLeaf {
+			number: 100,
+			hash: Hash::from_low_u64_be(130),
+			para_data: vec![
+				(1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))),
+				(2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))),
+			],
+		};
 
-				// If the requested count exceeds the largest chain, return the longest
-				// chain we can get.
-				for count in 5..10 {
-					get_backable_candidates(
-						&mut virtual_overseer,
-						&leaf_a,
-						1.into(),
-						vec![candidate_hash_a].into_iter().collect(),
-						count,
-						vec![
-							(candidate_hash_c, leaf_a.hash),
-							(candidate_hash_h, leaf_a.hash),
-							(candidate_hash_i, leaf_a.hash),
-							(candidate_hash_j, leaf_a.hash),
-						],
-					)
-					.await;
-				}
-			}
+		// Activate leaves.
+		activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await;
 
-			// required path of 2 and higher
-			{
-				get_backable_candidates(
-					&mut virtual_overseer,
-					&leaf_a,
-					1.into(),
-					vec![candidate_hash_a, candidate_hash_i, candidate_hash_h, candidate_hash_c]
-						.into_iter()
-						.collect(),
-					1,
-					vec![(candidate_hash_j, leaf_a.hash)],
-				)
-				.await;
+		// Candidate A
+		let (candidate_a, pvd_a) = make_candidate(
+			leaf_a.hash,
+			leaf_a.number,
+			1.into(),
+			HeadData(vec![1, 2, 3]),
+			HeadData(vec![1]),
+			test_state.validation_code_hash,
+		);
+		let candidate_hash_a = candidate_a.hash();
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await;
+		back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await;
 
-				get_backable_candidates(
-					&mut virtual_overseer,
-					&leaf_a,
-					1.into(),
-					vec![candidate_hash_a, candidate_hash_b].into_iter().collect(),
-					1,
-					vec![(candidate_hash_d, leaf_a.hash)],
-				)
-				.await;
+		let (candidate_b, candidate_hash_b) =
+			make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_a, 2);
+		let (candidate_c, candidate_hash_c) =
+			make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 3);
+		let (_candidate_d, candidate_hash_d) =
+			make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_c, 4);
 
-				// If the requested count exceeds the largest chain, return the longest
-				// chain we can get.
-				for count in 4..10 {
-					get_backable_candidates(
-						&mut virtual_overseer,
-						&leaf_a,
-						1.into(),
-						vec![candidate_hash_a, candidate_hash_c].into_iter().collect(),
-						count,
-						vec![
-							(candidate_hash_h, leaf_a.hash),
-							(candidate_hash_i, leaf_a.hash),
-							(candidate_hash_j, leaf_a.hash),
-						],
-					)
-					.await;
-				}
-			}
+		// Should not get any backable candidates for the other para.
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			2.into(),
+			Ancestors::new(),
+			1,
+			vec![],
+		)
+		.await;
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			2.into(),
+			Ancestors::new(),
+			5,
+			vec![],
+		)
+		.await;
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			2.into(),
+			vec![candidate_hash_a].into_iter().collect(),
+			1,
+			vec![],
+		)
+		.await;
 
-			// No more candidates in any chain.
-			{
-				for count in 1..4 {
-					get_backable_candidates(
-						&mut virtual_overseer,
-						&leaf_a,
-						1.into(),
-						vec![candidate_hash_a, candidate_hash_b, candidate_hash_e]
-							.into_iter()
-							.collect(),
-						count,
-						vec![],
-					)
-					.await;
-
-					get_backable_candidates(
-						&mut virtual_overseer,
-						&leaf_a,
-						1.into(),
-						vec![
-							candidate_hash_a,
-							candidate_hash_c,
-							candidate_hash_h,
-							candidate_hash_i,
-							candidate_hash_j,
-						]
-						.into_iter()
-						.collect(),
-						count,
-						vec![],
-					)
-					.await;
-				}
-			}
+		// Test various scenarios with various counts.
 
-			// Wrong paths.
+		// empty ancestors
+		{
 			get_backable_candidates(
 				&mut virtual_overseer,
 				&leaf_a,
 				1.into(),
-				vec![candidate_hash_b].into_iter().collect(),
+				Ancestors::new(),
 				1,
 				vec![(candidate_hash_a, leaf_a.hash)],
 			)
 			.await;
+			for count in 4..10 {
+				get_backable_candidates(
+					&mut virtual_overseer,
+					&leaf_a,
+					1.into(),
+					Ancestors::new(),
+					count,
+					vec![
+						(candidate_hash_a, leaf_a.hash),
+						(candidate_hash_b, leaf_a.hash),
+						(candidate_hash_c, leaf_a.hash),
+						(candidate_hash_d, leaf_a.hash),
+					],
+				)
+				.await;
+			}
+		}
+
+		// ancestors of size 1
+		{
 			get_backable_candidates(
 				&mut virtual_overseer,
 				&leaf_a,
 				1.into(),
-				vec![candidate_hash_b, candidate_hash_f].into_iter().collect(),
-				3,
-				vec![
-					(candidate_hash_a, leaf_a.hash),
-					(candidate_hash_b, leaf_a.hash),
-					(candidate_hash_d, leaf_a.hash),
-				],
-			)
-			.await;
-			get_backable_candidates(
-				&mut virtual_overseer,
-				&leaf_a,
-				1.into(),
-				vec![candidate_hash_a, candidate_hash_h].into_iter().collect(),
-				4,
-				vec![
-					(candidate_hash_c, leaf_a.hash),
-					(candidate_hash_h, leaf_a.hash),
-					(candidate_hash_i, leaf_a.hash),
-					(candidate_hash_j, leaf_a.hash),
-				],
+				vec![candidate_hash_a].into_iter().collect(),
+				1,
+				vec![(candidate_hash_b, leaf_a.hash)],
 			)
 			.await;
 			get_backable_candidates(
 				&mut virtual_overseer,
 				&leaf_a,
 				1.into(),
-				vec![candidate_hash_e, candidate_hash_h].into_iter().collect(),
+				vec![candidate_hash_a].into_iter().collect(),
 				2,
-				vec![(candidate_hash_a, leaf_a.hash), (candidate_hash_b, leaf_a.hash)],
+				vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_c, leaf_a.hash)],
 			)
 			.await;
 
-			get_backable_candidates(
-				&mut virtual_overseer,
-				&leaf_a,
-				1.into(),
-				vec![candidate_hash_a, candidate_hash_c, candidate_hash_d].into_iter().collect(),
-				2,
-				vec![(candidate_hash_h, leaf_a.hash), (candidate_hash_i, leaf_a.hash)],
-			)
-			.await;
+			// If the requested count exceeds the largest chain, return the longest
+			// chain we can get.
+			for count in 3..10 {
+				get_backable_candidates(
+					&mut virtual_overseer,
+					&leaf_a,
+					1.into(),
+					vec![candidate_hash_a].into_iter().collect(),
+					count,
+					vec![
+						(candidate_hash_b, leaf_a.hash),
+						(candidate_hash_c, leaf_a.hash),
+						(candidate_hash_d, leaf_a.hash),
+					],
+				)
+				.await;
+			}
+		}
 
-			// Parachain fork.
+		// ancestor count 2 and higher
+		{
 			get_backable_candidates(
 				&mut virtual_overseer,
 				&leaf_a,
 				1.into(),
 				vec![candidate_hash_a, candidate_hash_b, candidate_hash_c].into_iter().collect(),
 				1,
-				vec![],
+				vec![(candidate_hash_d, leaf_a.hash)],
 			)
 			.await;
 
-			// Non-existent candidate.
 			get_backable_candidates(
 				&mut virtual_overseer,
 				&leaf_a,
 				1.into(),
-				vec![candidate_hash_a, CandidateHash(Hash::from_low_u64_be(100))]
-					.into_iter()
-					.collect(),
-				2,
-				vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_d, leaf_a.hash)],
+				vec![candidate_hash_a, candidate_hash_b].into_iter().collect(),
+				1,
+				vec![(candidate_hash_c, leaf_a.hash)],
 			)
 			.await;
 
-			// Requested count is zero.
-			get_backable_candidates(
-				&mut virtual_overseer,
-				&leaf_a,
-				1.into(),
-				Ancestors::new(),
-				0,
-				vec![],
-			)
-			.await;
-			get_backable_candidates(
-				&mut virtual_overseer,
-				&leaf_a,
-				1.into(),
-				vec![candidate_hash_a].into_iter().collect(),
-				0,
-				vec![],
-			)
-			.await;
+			// If the requested count exceeds the largest chain, return the longest
+			// chain we can get.
+			for count in 3..10 {
+				get_backable_candidates(
+					&mut virtual_overseer,
+					&leaf_a,
+					1.into(),
+					vec![candidate_hash_a, candidate_hash_b].into_iter().collect(),
+					count,
+					vec![(candidate_hash_c, leaf_a.hash), (candidate_hash_d, leaf_a.hash)],
+				)
+				.await;
+			}
+		}
+
+		// No more candidates in the chain.
+		for count in 1..4 {
 			get_backable_candidates(
 				&mut virtual_overseer,
 				&leaf_a,
 				1.into(),
-				vec![candidate_hash_a, candidate_hash_b].into_iter().collect(),
-				0,
+				vec![candidate_hash_a, candidate_hash_b, candidate_hash_c, candidate_hash_d]
+					.into_iter()
+					.collect(),
+				count,
 				vec![],
 			)
 			.await;
+		}
+
+		// Wrong paths.
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			1.into(),
+			vec![candidate_hash_b].into_iter().collect(),
+			1,
+			vec![(candidate_hash_a, leaf_a.hash)],
+		)
+		.await;
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			1.into(),
+			vec![candidate_hash_b, candidate_hash_c].into_iter().collect(),
+			3,
+			vec![
+				(candidate_hash_a, leaf_a.hash),
+				(candidate_hash_b, leaf_a.hash),
+				(candidate_hash_c, leaf_a.hash),
+			],
+		)
+		.await;
+
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			1.into(),
+			vec![candidate_hash_a, candidate_hash_c, candidate_hash_d].into_iter().collect(),
+			2,
+			vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_c, leaf_a.hash)],
+		)
+		.await;
 
-			virtual_overseer
-		});
+		// Non-existent candidate.
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			1.into(),
+			vec![candidate_hash_a, CandidateHash(Hash::from_low_u64_be(100))]
+				.into_iter()
+				.collect(),
+			2,
+			vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_c, leaf_a.hash)],
+		)
+		.await;
 
-		assert_eq!(view.active_leaves.len(), 1);
-		assert_eq!(view.candidate_storage.len(), 2);
-		// 10 candidates and 7 parents on para 1.
-		assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (7, 10));
-		assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0));
-	}
+		// Requested count is zero.
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			1.into(),
+			Ancestors::new(),
+			0,
+			vec![],
+		)
+		.await;
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			1.into(),
+			vec![candidate_hash_a].into_iter().collect(),
+			0,
+			vec![],
+		)
+		.await;
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			1.into(),
+			vec![candidate_hash_a, candidate_hash_b].into_iter().collect(),
+			0,
+			vec![],
+		)
+		.await;
+
+		virtual_overseer
+	});
+
+	assert_eq!(view.active_leaves.len(), 1);
+	assert_eq!(view.candidate_storage.len(), 2);
+	// 4 candidates on para 1.
+	assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (4, 4));
+	assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0));
 }
 
-// Test depth query.
+// Test hypothetical membership query.
 #[test]
-fn check_hypothetical_frontier_query() {
+fn check_hypothetical_membership_query() {
 	let test_state = TestState::default();
 	let view = test_harness(|mut virtual_overseer| async move {
+		// Leaf B
+		let leaf_b = TestLeaf {
+			number: 101,
+			hash: Hash::from_low_u64_be(131),
+			para_data: vec![
+				(1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))),
+				(2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))),
+			],
+		};
 		// Leaf A
 		let leaf_a = TestLeaf {
 			number: 100,
-			hash: Hash::from_low_u64_be(130),
+			hash: get_parent_hash(leaf_b.hash),
 			para_data: vec![
-				(1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))),
+				(1.into(), PerParaData::new(98, HeadData(vec![1, 2, 3]))),
 				(2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))),
 			],
 		};
 
 		// Activate leaves.
-		activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await;
+		activate_leaf_with_params(
+			&mut virtual_overseer,
+			&leaf_a,
+			&test_state,
+			AsyncBackingParams { allowed_ancestry_len: 3, max_candidate_depth: 1 },
+		)
+		.await;
+		activate_leaf_with_params(
+			&mut virtual_overseer,
+			&leaf_b,
+			&test_state,
+			AsyncBackingParams { allowed_ancestry_len: 3, max_candidate_depth: 1 },
+		)
+		.await;
+
+		// Candidates will be valid on both leaves.
 
 		// Candidate A.
 		let (candidate_a, pvd_a) = make_candidate(
@@ -1415,7 +1788,6 @@ fn check_hypothetical_frontier_query() {
 			HeadData(vec![1]),
 			test_state.validation_code_hash,
 		);
-		let candidate_hash_a = candidate_a.hash();
 
 		// Candidate B.
 		let (candidate_b, pvd_b) = make_candidate(
@@ -1426,7 +1798,6 @@ fn check_hypothetical_frontier_query() {
 			HeadData(vec![2]),
 			test_state.validation_code_hash,
 		);
-		let candidate_hash_b = candidate_b.hash();
 
 		// Candidate C.
 		let (candidate_c, pvd_c) = make_candidate(
@@ -1437,127 +1808,99 @@ fn check_hypothetical_frontier_query() {
 			HeadData(vec![3]),
 			test_state.validation_code_hash,
 		);
-		let candidate_hash_c = candidate_c.hash();
 
-		// Get hypothetical frontier of candidate A before adding it.
-		get_hypothetical_frontier(
-			&mut virtual_overseer,
-			candidate_hash_a,
-			candidate_a.clone(),
-			pvd_a.clone(),
-			leaf_a.hash,
-			false,
-			vec![0],
-		)
-		.await;
-		// Should work with `backed_in_path_only: true`, too.
-		get_hypothetical_frontier(
-			&mut virtual_overseer,
-			candidate_hash_a,
-			candidate_a.clone(),
-			pvd_a.clone(),
-			leaf_a.hash,
-			true,
-			vec![0],
-		)
-		.await;
+		// Get hypothetical membership of candidates before adding candidate A.
+		// Candidate A can be added directly, candidates B and C are potential candidates.
+		for (candidate, pvd) in [
+			(candidate_a.clone(), pvd_a.clone()),
+			(candidate_b.clone(), pvd_b.clone()),
+			(candidate_c.clone(), pvd_c.clone()),
+		] {
+			get_hypothetical_membership(
+				&mut virtual_overseer,
+				candidate.hash(),
+				candidate,
+				pvd,
+				vec![leaf_a.hash, leaf_b.hash],
+			)
+			.await;
+		}
 
 		// Add candidate A.
-		introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()).await;
-
-		// Get frontier of candidate A after adding it.
-		get_hypothetical_frontier(
-			&mut virtual_overseer,
-			candidate_hash_a,
-			candidate_a.clone(),
-			pvd_a.clone(),
-			leaf_a.hash,
-			false,
-			vec![0],
-		)
-		.await;
-
-		// Get hypothetical frontier of candidate B before adding it.
-		get_hypothetical_frontier(
-			&mut virtual_overseer,
-			candidate_hash_b,
-			candidate_b.clone(),
-			pvd_b.clone(),
-			leaf_a.hash,
-			false,
-			vec![1],
-		)
-		.await;
-
-		// Add candidate B.
-		introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b.clone()).await;
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone())
+			.await;
 
-		// Get frontier of candidate B after adding it.
-		get_hypothetical_frontier(
-			&mut virtual_overseer,
-			candidate_hash_b,
-			candidate_b,
-			pvd_b.clone(),
-			leaf_a.hash,
-			false,
-			vec![1],
-		)
-		.await;
+		// Get membership of candidates after adding A. C is not a potential candidate because we
+		// may only add one more candidate, which must be a connected candidate.
+		for (candidate, pvd) in
+			[(candidate_a.clone(), pvd_a.clone()), (candidate_b.clone(), pvd_b.clone())]
+		{
+			get_hypothetical_membership(
+				&mut virtual_overseer,
+				candidate.hash(),
+				candidate,
+				pvd,
+				vec![leaf_a.hash, leaf_b.hash],
+			)
+			.await;
+		}
 
-		// Get hypothetical frontier of candidate C before adding it.
-		get_hypothetical_frontier(
-			&mut virtual_overseer,
-			candidate_hash_c,
-			candidate_c.clone(),
-			pvd_c.clone(),
-			leaf_a.hash,
-			false,
-			vec![2],
-		)
-		.await;
-		// Should be empty with `backed_in_path_only` because we haven't backed anything.
-		get_hypothetical_frontier(
+		get_hypothetical_membership(
 			&mut virtual_overseer,
-			candidate_hash_c,
+			candidate_c.hash(),
 			candidate_c.clone(),
 			pvd_c.clone(),
-			leaf_a.hash,
-			true,
 			vec![],
 		)
 		.await;
 
-		// Add candidate C.
-		introduce_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c.clone()).await;
+		// Candidate D has invalid relay parent.
+		let (candidate_d, pvd_d) = make_candidate(
+			Hash::from_low_u64_be(200),
+			leaf_a.number,
+			1.into(),
+			HeadData(vec![1]),
+			HeadData(vec![2]),
+			test_state.validation_code_hash,
+		);
+		introduce_seconded_candidate_failed(&mut virtual_overseer, candidate_d, pvd_d).await;
 
-		// Get frontier of candidate C after adding it.
-		get_hypothetical_frontier(
-			&mut virtual_overseer,
-			candidate_hash_c,
-			candidate_c.clone(),
-			pvd_c.clone(),
-			leaf_a.hash,
-			false,
-			vec![2],
-		)
-		.await;
-		// Should be empty with `backed_in_path_only` because we haven't backed anything.
-		get_hypothetical_frontier(
+		// Add candidate B.
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b.clone())
+			.await;
+
+		// Get membership of candidates after adding B.
+		for (candidate, pvd) in
+			[(candidate_a.clone(), pvd_a.clone()), (candidate_b.clone(), pvd_b.clone())]
+		{
+			get_hypothetical_membership(
+				&mut virtual_overseer,
+				candidate.hash(),
+				candidate,
+				pvd,
+				vec![leaf_a.hash, leaf_b.hash],
+			)
+			.await;
+		}
+
+		get_hypothetical_membership(
 			&mut virtual_overseer,
-			candidate_hash_c,
+			candidate_c.hash(),
 			candidate_c.clone(),
 			pvd_c.clone(),
-			leaf_a.hash,
-			true,
 			vec![],
 		)
 		.await;
 
+		// Add candidate C. It will fail because we have enough candidates for the configured depth.
+		introduce_seconded_candidate_failed(&mut virtual_overseer, candidate_c, pvd_c).await;
+
 		virtual_overseer
 	});
 
-	assert_eq!(view.active_leaves.len(), 1);
+	assert_eq!(view.active_leaves.len(), 2);
 	assert_eq!(view.candidate_storage.len(), 2);
+	assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (2, 2));
 }
 
 #[test]
@@ -1618,7 +1961,8 @@ fn check_pvd_query() {
 		.await;
 
 		// Add candidate A.
-		introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()).await;
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone())
+			.await;
 		back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await;
 
 		// Get pvd of candidate A after adding it.
@@ -1642,7 +1986,7 @@ fn check_pvd_query() {
 		.await;
 
 		// Add candidate B.
-		introduce_candidate(&mut virtual_overseer, candidate_b, pvd_b.clone()).await;
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_b, pvd_b.clone()).await;
 
 		// Get pvd of candidate B after adding it.
 		get_pvd(
@@ -1665,7 +2009,7 @@ fn check_pvd_query() {
 		.await;
 
 		// Add candidate C.
-		introduce_candidate(&mut virtual_overseer, candidate_c, pvd_c.clone()).await;
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_c, pvd_c.clone()).await;
 
 		// Get pvd of candidate C after adding it.
 		get_pvd(
@@ -1849,8 +2193,7 @@ fn persists_pending_availability_candidate() {
 		);
 		let candidate_hash_b = candidate_b.hash();
 
-		introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await;
-		second_candidate(&mut virtual_overseer, candidate_a.clone()).await;
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await;
 		back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await;
 
 		let candidate_a_pending_av = CandidatePendingAvailability {
@@ -1874,8 +2217,7 @@ fn persists_pending_availability_candidate() {
 		};
 		activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await;
 
-		introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await;
-		second_candidate(&mut virtual_overseer, candidate_b.clone()).await;
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await;
 		back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await;
 
 		get_backable_candidates(
@@ -1942,8 +2284,7 @@ fn backwards_compatible() {
 		);
 		let candidate_hash_a = candidate_a.hash();
 
-		introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await;
-		second_candidate(&mut virtual_overseer, candidate_a.clone()).await;
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await;
 		back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await;
 
 		get_backable_candidates(
diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs
index 5cfcb96dc2bc710c42400fa49fa888195c79aa48..fa16b38d28bda4e364c5b6d8b74bb85ca727036d 100644
--- a/polkadot/node/core/provisioner/src/lib.rs
+++ b/polkadot/node/core/provisioner/src/lib.rs
@@ -877,7 +877,7 @@ async fn get_block_number_under_construction(
 }
 
 /// Requests backable candidates from Prospective Parachains based on
-/// the given ancestors in the fragment tree. The ancestors may not be ordered.
+/// the given ancestors in the fragment chain. The ancestors may not be ordered.
 async fn get_backable_candidates(
 	relay_parent: Hash,
 	para_id: ParaId,
diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml
index 2c7135742f56890a7ad147e95c545cbebde04c9b..398d2783916fbfbf8b29b6285151d47931cad51e 100644
--- a/polkadot/node/network/collator-protocol/Cargo.toml
+++ b/polkadot/node/network/collator-protocol/Cargo.toml
@@ -32,6 +32,7 @@ tokio-util = "0.7.1"
 log = { workspace = true, default-features = true }
 env_logger = "0.11"
 assert_matches = "1.4.0"
+rstest = "0.18.2"
 
 sp-core = { path = "../../../../substrate/primitives/core", features = ["std"] }
 sp-keyring = { path = "../../../../substrate/primitives/keyring" }
diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs
index 879caf923285b3341a0bc28bc52371fb9c81ff69..f227e3855fa0a72b268a5be9b6e33d8712111dc1 100644
--- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs
+++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs
@@ -261,7 +261,7 @@ struct State {
 	/// `active_leaves`, the opposite doesn't hold true.
 	///
 	/// Relay-chain blocks which don't support prospective parachains are
-	/// never included in the fragment trees of active leaves which do. In
+	/// never included in the fragment chains of active leaves which do. In
 	/// particular, this means that if a given relay parent belongs to implicit
 	/// ancestry of some active leaf, then it does support prospective parachains.
 	implicit_view: ImplicitView,
@@ -531,7 +531,7 @@ async fn distribute_collation<Context>(
 	// Otherwise, it should be present in allowed ancestry of some leaf.
 	//
 	// It's collation-producer responsibility to verify that there exists
-	// a hypothetical membership in a fragment tree for candidate.
+	// a hypothetical membership in a fragment chain for the candidate.
 	let interested =
 		state
 			.peer_data
@@ -894,7 +894,7 @@ async fn process_msg<Context>(
 				);
 			}
 		},
-		msg @ (ReportCollator(..) | Invalid(..) | Seconded(..) | Backed { .. }) => {
+		msg @ (ReportCollator(..) | Invalid(..) | Seconded(..)) => {
 			gum::warn!(
 				target: LOG_TARGET,
 				"{:?} message is not expected on the collator side of the protocol",
diff --git a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs
index 8c3889a3554865c919f2eb33a8f86cce15317ff3..001df1fb3da9b24a3c1acffc049cc7433903aea8 100644
--- a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs
+++ b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs
@@ -121,19 +121,15 @@ impl PendingCollation {
 	}
 }
 
-/// v2 or v3 advertisement that was rejected by the backing
-/// subsystem. Validator may fetch it later if its fragment
-/// membership gets recognized before relay parent goes out of view.
-#[derive(Debug, Clone)]
-pub struct BlockedAdvertisement {
-	/// Peer that advertised the collation.
-	pub peer_id: PeerId,
-	/// Collator id.
-	pub collator_id: CollatorId,
-	/// The relay-parent of the candidate.
-	pub candidate_relay_parent: Hash,
-	/// Hash of the candidate.
-	pub candidate_hash: CandidateHash,
+/// An identifier for a fetched collation that was blocked from being seconded because we don't have
+/// access to the parent's HeadData. Can be retried once the candidate outputting this head data is
+/// seconded.
+#[derive(Debug, Clone, Eq, PartialEq, Hash)]
+pub struct BlockedCollationId {
+	/// Para id.
+	pub para_id: ParaId,
+	/// Hash of the parent head data.
+	pub parent_head_data_hash: Hash,
 }
 
 /// Performs a sanity check between advertised and fetched collations.
diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs
index ac8c060827f5a4519cd8e4930d1447a8bf4cbe47..9f037a983e51c33cb734ee65fc496541d1082bf2 100644
--- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs
+++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs
@@ -59,15 +59,17 @@ use polkadot_primitives::{
 
 use crate::error::{Error, FetchError, Result, SecondingError};
 
+use self::collation::BlockedCollationId;
+
 use super::{modify_reputation, tick_stream, LOG_TARGET};
 
 mod collation;
 mod metrics;
 
 use collation::{
-	fetched_collation_sanity_check, BlockedAdvertisement, CollationEvent, CollationFetchError,
-	CollationFetchRequest, CollationStatus, Collations, FetchedCollation, PendingCollation,
-	PendingCollationFetch, ProspectiveCandidate,
+	fetched_collation_sanity_check, CollationEvent, CollationFetchError, CollationFetchRequest,
+	CollationStatus, Collations, FetchedCollation, PendingCollation, PendingCollationFetch,
+	ProspectiveCandidate,
 };
 
 #[cfg(test)]
@@ -388,7 +390,7 @@ struct State {
 	/// `active_leaves`, the opposite doesn't hold true.
 	///
 	/// Relay-chain blocks which don't support prospective parachains are
-	/// never included in the fragment trees of active leaves which do. In
+	/// never included in the fragment chains of active leaves which do. In
 	/// particular, this means that if a given relay parent belongs to implicit
 	/// ancestry of some active leaf, then it does support prospective parachains.
 	implicit_view: ImplicitView,
@@ -421,14 +423,6 @@ struct State {
 	/// Span per relay parent.
 	span_per_relay_parent: HashMap<Hash, PerLeafSpan>,
 
-	/// Advertisements that were accepted as valid by collator protocol but rejected by backing.
-	///
-	/// It's only legal to fetch collations that are either built on top of the root
-	/// of some fragment tree or have a parent node which represents backed candidate.
-	/// Otherwise, a validator will keep such advertisement in the memory and re-trigger
-	/// requests to backing on new backed candidates and activations.
-	blocked_advertisements: HashMap<(ParaId, Hash), Vec<BlockedAdvertisement>>,
-
 	/// When a timer in this `FuturesUnordered` triggers, we should dequeue the next request
 	/// attempt in the corresponding `collations_per_relay_parent`.
 	///
@@ -441,6 +435,12 @@ struct State {
 	/// on validation.
 	fetched_candidates: HashMap<FetchedCollation, CollationEvent>,
 
+	/// Collations which we haven't been able to second due to their parent not being known by
+	/// prospective-parachains. Mapped from the paraid and parent_head_hash to the fetched
+	/// collation data. Only needed for async backing. For elastic scaling, the fetched collation
+	/// must contain the full parent head data.
+	blocked_from_seconding: HashMap<BlockedCollationId, Vec<PendingCollationFetch>>,
+
 	/// Aggregated reputation change
 	reputation: ReputationAggregator,
 }
@@ -953,6 +953,8 @@ enum AdvertisementError {
 	/// Advertisement is invalid.
 	#[allow(dead_code)]
 	Invalid(InsertAdvertisementError),
+	/// Seconding not allowed by backing subsystem
+	BlockedByBacking,
 }
 
 impl AdvertisementError {
@@ -962,7 +964,7 @@ impl AdvertisementError {
 			InvalidAssignment => Some(COST_WRONG_PARA),
 			ProtocolMisuse => Some(COST_PROTOCOL_MISUSE),
 			RelayParentUnknown | UndeclaredCollator | Invalid(_) => Some(COST_UNEXPECTED_MESSAGE),
-			UnknownPeer | SecondedLimitReached => None,
+			UnknownPeer | SecondedLimitReached | BlockedByBacking => None,
 		}
 	}
 }
@@ -1001,57 +1003,55 @@ where
 	})
 }
 
-/// Checks whether any of the advertisements are unblocked and attempts to fetch them.
-async fn request_unblocked_collations<Sender, I>(sender: &mut Sender, state: &mut State, blocked: I)
-where
-	Sender: CollatorProtocolSenderTrait,
-	I: IntoIterator<Item = ((ParaId, Hash), Vec<BlockedAdvertisement>)>,
-{
-	let _timer = state.metrics.time_request_unblocked_collations();
+// Try seconding any collations which were waiting on the validation of their parent
+#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
+async fn second_unblocked_collations<Context>(
+	ctx: &mut Context,
+	state: &mut State,
+	para_id: ParaId,
+	head_data: HeadData,
+	head_data_hash: Hash,
+) {
+	if let Some(unblocked_collations) = state
+		.blocked_from_seconding
+		.remove(&BlockedCollationId { para_id, parent_head_data_hash: head_data_hash })
+	{
+		if !unblocked_collations.is_empty() {
+			gum::debug!(
+				target: LOG_TARGET,
+				"Candidate outputting head data with hash {} unblocked {} collations for seconding.",
+				head_data_hash,
+				unblocked_collations.len()
+			);
+		}
 
-	for (key, mut value) in blocked {
-		let (para_id, para_head) = key;
-		let blocked = std::mem::take(&mut value);
-		for blocked in blocked {
-			let is_seconding_allowed = can_second(
-				sender,
-				para_id,
-				blocked.candidate_relay_parent,
-				blocked.candidate_hash,
-				para_head,
-			)
-			.await;
+		for mut unblocked_collation in unblocked_collations {
+			unblocked_collation.maybe_parent_head_data = Some(head_data.clone());
+			let peer_id = unblocked_collation.collation_event.pending_collation.peer_id;
+			let relay_parent = unblocked_collation.candidate_receipt.descriptor.relay_parent;
 
-			if is_seconding_allowed {
-				let result = enqueue_collation(
-					sender,
-					state,
-					blocked.candidate_relay_parent,
-					para_id,
-					blocked.peer_id,
-					blocked.collator_id,
-					Some((blocked.candidate_hash, para_head)),
-				)
-				.await;
-				if let Err(fetch_error) = result {
-					gum::debug!(
-						target: LOG_TARGET,
-						relay_parent = ?blocked.candidate_relay_parent,
-						para_id = ?para_id,
-						peer_id = ?blocked.peer_id,
-						error = %fetch_error,
-						"Failed to request unblocked collation",
-					);
+			if let Err(err) = kick_off_seconding(ctx, state, unblocked_collation).await {
+				gum::warn!(
+					target: LOG_TARGET,
+					?relay_parent,
+					?para_id,
+					?peer_id,
+					error = %err,
+					"Seconding aborted due to an error",
+				);
+
+				if err.is_malicious() {
+					// Report malicious peer.
+					modify_reputation(
+						&mut state.reputation,
+						ctx.sender(),
+						peer_id,
+						COST_REPORT_BAD,
+					)
+					.await;
 				}
-			} else {
-				// Keep the advertisement.
-				value.push(blocked);
 			}
 		}
-
-		if !value.is_empty() {
-			state.blocked_advertisements.insert(key, value);
-		}
 	}
 }
 
@@ -1110,10 +1110,10 @@ where
 	}
 
 	if let Some((candidate_hash, parent_head_data_hash)) = prospective_candidate {
-		// We need to queue the advertisement if we are not allowed to second it.
+		// Check if backing subsystem allows to second this candidate.
 		//
-		// This is also only important when async backing is enabled.
-		let queue_advertisement = relay_parent_mode.is_enabled() &&
+		// This is also only important when async backing or elastic scaling is enabled.
+		let seconding_not_allowed = relay_parent_mode.is_enabled() &&
 			!can_second(
 				sender,
 				collator_para_id,
@@ -1123,26 +1123,8 @@ where
 			)
 			.await;
 
-		if queue_advertisement {
-			gum::debug!(
-				target: LOG_TARGET,
-				relay_parent = ?relay_parent,
-				para_id = ?para_id,
-				?candidate_hash,
-				"Seconding is not allowed by backing, queueing advertisement",
-			);
-			state
-				.blocked_advertisements
-				.entry((collator_para_id, parent_head_data_hash))
-				.or_default()
-				.push(BlockedAdvertisement {
-					peer_id,
-					collator_id: collator_id.clone(),
-					candidate_relay_parent: relay_parent,
-					candidate_hash,
-				});
-
-			return Ok(())
+		if seconding_not_allowed {
+			return Err(AdvertisementError::BlockedByBacking)
 		}
 	}
 
@@ -1358,20 +1340,17 @@ where
 			state.span_per_relay_parent.remove(&removed);
 		}
 	}
-	// Remove blocked advertisements that left the view.
-	state.blocked_advertisements.retain(|_, ads| {
-		ads.retain(|ad| state.per_relay_parent.contains_key(&ad.candidate_relay_parent));
 
-		!ads.is_empty()
+	// Remove blocked seconding requests that left the view.
+	state.blocked_from_seconding.retain(|_, collations| {
+		collations.retain(|collation| {
+			state
+				.per_relay_parent
+				.contains_key(&collation.candidate_receipt.descriptor.relay_parent)
+		});
+
+		!collations.is_empty()
 	});
-	// Re-trigger previously failed requests again.
-	//
-	// This makes sense for several reasons, one simple example: if a hypothetical depth
-	// for an advertisement initially exceeded the limit and the candidate was included
-	// in a new leaf.
-	let maybe_unblocked = std::mem::take(&mut state.blocked_advertisements);
-	// Could be optimized to only sanity check new leaves.
-	request_unblocked_collations(sender, state, maybe_unblocked).await;
 
 	for (peer_id, peer_data) in state.peer_data.iter_mut() {
 		peer_data.prune_old_advertisements(
@@ -1508,6 +1487,8 @@ async fn process_msg<Context>(
 					return
 				},
 			};
+			let output_head_data = receipt.commitments.head_data.clone();
+			let output_head_data_hash = receipt.descriptor.para_head;
 			let fetched_collation = FetchedCollation::from(&receipt.to_plain());
 			if let Some(CollationEvent { collator_id, pending_collation, .. }) =
 				state.fetched_candidates.remove(&fetched_collation)
@@ -1536,6 +1517,17 @@ async fn process_msg<Context>(
 					rp_state.collations.status = CollationStatus::Seconded;
 					rp_state.collations.note_seconded();
 				}
+
+				// See if we've unblocked other collations for seconding.
+				second_unblocked_collations(
+					ctx,
+					state,
+					fetched_collation.para_id,
+					output_head_data,
+					output_head_data_hash,
+				)
+				.await;
+
 				// If async backing is enabled, make an attempt to fetch next collation.
 				let maybe_candidate_hash =
 					prospective_candidate.as_ref().map(ProspectiveCandidate::candidate_hash);
@@ -1554,11 +1546,13 @@ async fn process_msg<Context>(
 				);
 			}
 		},
-		Backed { para_id, para_head } => {
-			let maybe_unblocked = state.blocked_advertisements.remove_entry(&(para_id, para_head));
-			request_unblocked_collations(ctx.sender(), state, maybe_unblocked).await;
-		},
 		Invalid(parent, candidate_receipt) => {
+			// Remove collations which were blocked from seconding and had this candidate as parent.
+			state.blocked_from_seconding.remove(&BlockedCollationId {
+				para_id: candidate_receipt.descriptor.para_id,
+				parent_head_data_hash: candidate_receipt.descriptor.para_head,
+			});
+
 			let fetched_collation = FetchedCollation::from(&candidate_receipt);
 			let candidate_hash = fetched_collation.candidate_hash;
 			let id = match state.fetched_candidates.entry(fetched_collation) {
@@ -1668,29 +1662,45 @@ async fn run_inner<Context>(
 				};
 
 				let CollationEvent {collator_id, pending_collation, .. } = res.collation_event.clone();
-				if let Err(err) = kick_off_seconding(&mut ctx, &mut state, res).await {
-					gum::warn!(
-						target: LOG_TARGET,
-						relay_parent = ?pending_collation.relay_parent,
-						para_id = ?pending_collation.para_id,
-						peer_id = ?pending_collation.peer_id,
-						error = %err,
-						"Seconding aborted due to an error",
-					);
 
-					if err.is_malicious() {
-						// Report malicious peer.
-						modify_reputation(&mut state.reputation, ctx.sender(), pending_collation.peer_id, COST_REPORT_BAD).await;
+				match kick_off_seconding(&mut ctx, &mut state, res).await {
+					Err(err) => {
+						gum::warn!(
+							target: LOG_TARGET,
+							relay_parent = ?pending_collation.relay_parent,
+							para_id = ?pending_collation.para_id,
+							peer_id = ?pending_collation.peer_id,
+							error = %err,
+							"Seconding aborted due to an error",
+						);
+
+						if err.is_malicious() {
+							// Report malicious peer.
+							modify_reputation(&mut state.reputation, ctx.sender(), pending_collation.peer_id, COST_REPORT_BAD).await;
+						}
+						let maybe_candidate_hash =
+						pending_collation.prospective_candidate.as_ref().map(ProspectiveCandidate::candidate_hash);
+						dequeue_next_collation_and_fetch(
+							&mut ctx,
+							&mut state,
+							pending_collation.relay_parent,
+							(collator_id, maybe_candidate_hash),
+						)
+						.await;
+					},
+					Ok(false) => {
+						// No hard error occurred, but we can try fetching another collation.
+						let maybe_candidate_hash =
+						pending_collation.prospective_candidate.as_ref().map(ProspectiveCandidate::candidate_hash);
+						dequeue_next_collation_and_fetch(
+							&mut ctx,
+							&mut state,
+							pending_collation.relay_parent,
+							(collator_id, maybe_candidate_hash),
+						)
+						.await;
 					}
-					let maybe_candidate_hash =
-					pending_collation.prospective_candidate.as_ref().map(ProspectiveCandidate::candidate_hash);
-					dequeue_next_collation_and_fetch(
-						&mut ctx,
-						&mut state,
-						pending_collation.relay_parent,
-						(collator_id, maybe_candidate_hash),
-					)
-					.await;
+					Ok(true) => {}
 				}
 			}
 			res = state.collation_fetch_timeouts.select_next_some() => {
@@ -1800,12 +1810,13 @@ where
 }
 
 /// Handle a fetched collation result.
+/// Returns whether or not seconding has begun.
 #[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
 async fn kick_off_seconding<Context>(
 	ctx: &mut Context,
 	state: &mut State,
 	PendingCollationFetch { mut collation_event, candidate_receipt, pov, maybe_parent_head_data }: PendingCollationFetch,
-) -> std::result::Result<(), SecondingError> {
+) -> std::result::Result<bool, SecondingError> {
 	let pending_collation = collation_event.pending_collation;
 	let relay_parent = pending_collation.relay_parent;
 
@@ -1818,7 +1829,7 @@ async fn kick_off_seconding<Context>(
 				relay_parent = ?relay_parent,
 				"Fetched collation for a parent out of view",
 			);
-			return Ok(())
+			return Ok(false)
 		},
 	};
 	let collations = &mut per_relay_parent.collations;
@@ -1828,7 +1839,7 @@ async fn kick_off_seconding<Context>(
 		collation_event.pending_collation.commitments_hash =
 			Some(candidate_receipt.commitments_hash);
 
-		let (maybe_pvd, maybe_parent_head_and_hash) = match (
+		let (maybe_pvd, maybe_parent_head, maybe_parent_head_hash) = match (
 			collation_event.collator_protocol_version,
 			collation_event.pending_collation.prospective_candidate,
 		) {
@@ -1844,7 +1855,7 @@ async fn kick_off_seconding<Context>(
 				)
 				.await?;
 
-				(pvd, maybe_parent_head_data.map(|head_data| (head_data, parent_head_data_hash)))
+				(pvd, maybe_parent_head_data, Some(parent_head_data_hash))
 			},
 			// Support V2 collators without async backing enabled.
 			(CollationVersion::V2, Some(_)) | (CollationVersion::V1, _) => {
@@ -1854,20 +1865,60 @@ async fn kick_off_seconding<Context>(
 					candidate_receipt.descriptor().para_id,
 				)
 				.await?;
-				(pvd, None)
+				(
+					Some(pvd.ok_or(SecondingError::PersistedValidationDataNotFound)?),
+					maybe_parent_head_data,
+					None,
+				)
 			},
 			_ => {
 				// `handle_advertisement` checks for protocol mismatch.
-				return Ok(())
+				return Ok(false)
+			},
+		};
+
+		let pvd = match (maybe_pvd, maybe_parent_head.clone(), maybe_parent_head_hash) {
+			(Some(pvd), _, _) => pvd,
+			(None, None, Some(parent_head_data_hash)) => {
+				// In this case, the collator did not supply the head data and neither could
+				// prospective-parachains. We add this to the blocked_from_seconding collection
+				// until we second its parent.
+				let blocked_collation = PendingCollationFetch {
+					collation_event,
+					candidate_receipt,
+					pov,
+					maybe_parent_head_data: None,
+				};
+				gum::debug!(
+					target: LOG_TARGET,
+					candidate_hash = ?blocked_collation.candidate_receipt.hash(),
+					relay_parent = ?blocked_collation.candidate_receipt.descriptor.relay_parent,
+					"Collation having parent head data hash {} is blocked from seconding. Waiting on its parent to be validated.",
+					parent_head_data_hash
+				);
+				state
+					.blocked_from_seconding
+					.entry(BlockedCollationId {
+						para_id: blocked_collation.candidate_receipt.descriptor.para_id,
+						parent_head_data_hash,
+					})
+					.or_insert_with(Vec::new)
+					.push(blocked_collation);
+
+				return Ok(false)
+			},
+			(None, _, _) => {
+				// Even though we already have the parent head data, the pvd fetching failed. We
+				// don't need to wait for seconding another collation outputting this head data.
+				return Err(SecondingError::PersistedValidationDataNotFound)
 			},
 		};
-		let pvd = maybe_pvd.ok_or(SecondingError::PersistedValidationDataNotFound)?;
 
 		fetched_collation_sanity_check(
 			&collation_event.pending_collation,
 			&candidate_receipt,
 			&pvd,
-			maybe_parent_head_and_hash,
+			maybe_parent_head.and_then(|head| maybe_parent_head_hash.map(|hash| (head, hash))),
 		)?;
 
 		ctx.send_message(CandidateBackingMessage::Second(
@@ -1882,7 +1933,7 @@ async fn kick_off_seconding<Context>(
 		collations.status = CollationStatus::WaitingOnValidation;
 
 		entry.insert(collation_event);
-		Ok(())
+		Ok(true)
 	} else {
 		Err(SecondingError::Duplicate)
 	}
diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs
index eaa725f2642ed38e3a6f222f9624dd6e2bc4fcce..785690121dadd1858a4985459b515739842a1421 100644
--- a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs
+++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs
@@ -23,6 +23,7 @@ use polkadot_primitives::{
 	AsyncBackingParams, BlockNumber, CandidateCommitments, CommittedCandidateReceipt, Header,
 	SigningContext, ValidatorId,
 };
+use rstest::rstest;
 
 const ASYNC_BACKING_PARAMETERS: AsyncBackingParams =
 	AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 };
@@ -262,6 +263,48 @@ async fn assert_collation_seconded(
 	}
 }
 
+/// Assert that the next message is a persisted validation data request and respond with the
+/// supplied PVD.
+async fn assert_persisted_validation_data(
+	virtual_overseer: &mut VirtualOverseer,
+	version: CollationVersion,
+	expected_relay_parent: Hash,
+	expected_para_id: ParaId,
+	expected_parent_head_data_hash: Option<Hash>,
+	pvd: Option<PersistedValidationData>,
+) {
+	// Depending on relay parent mode pvd will be either requested
+	// from the Runtime API or Prospective Parachains.
+	let msg = overseer_recv(virtual_overseer).await;
+	match version {
+		CollationVersion::V1 => assert_matches!(
+			msg,
+			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+				hash,
+				RuntimeApiRequest::PersistedValidationData(para_id, assumption, tx),
+			)) => {
+				assert_eq!(expected_relay_parent, hash);
+				assert_eq!(expected_para_id, para_id);
+				assert_eq!(OccupiedCoreAssumption::Free, assumption);
+				tx.send(Ok(pvd)).unwrap();
+			}
+		),
+		CollationVersion::V2 => assert_matches!(
+			msg,
+			AllMessages::ProspectiveParachains(
+				ProspectiveParachainsMessage::GetProspectiveValidationData(request, tx),
+			) => {
+				assert_eq!(expected_relay_parent, request.candidate_relay_parent);
+				assert_eq!(expected_para_id, request.para_id);
+				if let Some(expected_parent_head_data_hash) = expected_parent_head_data_hash {
+					assert_eq!(expected_parent_head_data_hash, request.parent_head_data.hash());
+				}
+				tx.send(pvd).unwrap();
+			}
+		),
+	}
+}
+
 #[test]
 fn v1_advertisement_accepted_and_seconded() {
 	let test_state = TestState::default();
@@ -946,56 +989,73 @@ fn advertisement_spam_protection() {
 	});
 }
 
-#[test]
-fn backed_candidate_unblocks_advertisements() {
+#[rstest]
+#[case(true)]
+#[case(false)]
+fn child_blocked_from_seconding_by_parent(#[case] valid_parent: bool) {
 	let test_state = TestState::default();
 
 	test_harness(ReputationAggregator::new(|_| true), |test_harness| async move {
-		let TestHarness { mut virtual_overseer, .. } = test_harness;
+		let TestHarness { mut virtual_overseer, keystore } = test_harness;
 
-		let pair_a = CollatorPair::generate().0;
-		let pair_b = CollatorPair::generate().0;
+		let pair = CollatorPair::generate().0;
 
+		// Grandparent of head `a`.
 		let head_b = Hash::from_low_u64_be(128);
 		let head_b_num: u32 = 2;
 
-		let head_c = get_parent_hash(head_b);
 		// Grandparent of head `b`.
-		// Group rotation frequency is 1 by default, at `d` we're assigned
+		// Group rotation frequency is 1 by default, at `c` we're assigned
 		// to the first para.
-		let head_d = get_parent_hash(head_c);
+		let head_c = Hash::from_low_u64_be(130);
 
 		// Activated leaf is `b`, but the collation will be based on `c`.
 		update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await;
 
 		let peer_a = PeerId::random();
-		let peer_b = PeerId::random();
 
-		// Accept both collators from the implicit view.
 		connect_and_declare_collator(
 			&mut virtual_overseer,
 			peer_a,
-			pair_a.clone(),
+			pair.clone(),
 			test_state.chain_ids[0],
 			CollationVersion::V2,
 		)
 		.await;
-		connect_and_declare_collator(
-			&mut virtual_overseer,
-			peer_b,
-			pair_b.clone(),
-			test_state.chain_ids[1],
-			CollationVersion::V2,
-		)
-		.await;
 
-		let candidate_hash = CandidateHash::default();
-		let parent_head_data_hash = Hash::zero();
+		// Candidate A transitions from head data 0 to 1.
+		// Candidate B transitions from head data 1 to 2.
+
+		// Candidate B is advertised and fetched before candidate A.
+
+		let mut candidate_b = dummy_candidate_receipt_bad_sig(head_c, Some(Default::default()));
+		candidate_b.descriptor.para_id = test_state.chain_ids[0];
+		candidate_b.descriptor.para_head = HeadData(vec![2]).hash();
+		candidate_b.descriptor.persisted_validation_data_hash =
+			PersistedValidationData::<Hash, BlockNumber> {
+				parent_head: HeadData(vec![1]),
+				relay_parent_number: 5,
+				max_pov_size: 1024,
+				relay_parent_storage_root: Default::default(),
+			}
+			.hash();
+		let candidate_b_commitments = CandidateCommitments {
+			head_data: HeadData(vec![2]),
+			horizontal_messages: Default::default(),
+			upward_messages: Default::default(),
+			new_validation_code: None,
+			processed_downward_messages: 0,
+			hrmp_watermark: 0,
+		};
+		candidate_b.commitments_hash = candidate_b_commitments.hash();
+
+		let candidate_b_hash = candidate_b.hash();
+
 		advertise_collation(
 			&mut virtual_overseer,
-			peer_b,
+			peer_a,
 			head_c,
-			Some((candidate_hash, parent_head_data_hash)),
+			Some((candidate_b_hash, HeadData(vec![1]).hash())),
 		)
 		.await;
 		assert_matches!(
@@ -1003,40 +1063,73 @@ fn backed_candidate_unblocks_advertisements() {
 			AllMessages::CandidateBacking(
 				CandidateBackingMessage::CanSecond(request, tx),
 			) => {
-				assert_eq!(request.candidate_hash, candidate_hash);
-				assert_eq!(request.candidate_para_id, test_state.chain_ids[1]);
-				assert_eq!(request.parent_head_data_hash, parent_head_data_hash);
-				// Reject it.
-				tx.send(false).expect("receiving side should be alive");
+				assert_eq!(request.candidate_hash, candidate_b_hash);
+				assert_eq!(request.candidate_para_id, test_state.chain_ids[0]);
+				assert_eq!(request.parent_head_data_hash, HeadData(vec![1]).hash());
+				tx.send(true).expect("receiving side should be alive");
 			}
 		);
 
-		// Advertise with different para.
-		advertise_collation(
+		let response_channel = assert_fetch_collation_request(
 			&mut virtual_overseer,
-			peer_a,
-			head_d, // Note different relay parent.
-			Some((candidate_hash, parent_head_data_hash)),
+			head_c,
+			test_state.chain_ids[0],
+			Some(candidate_b_hash),
 		)
 		.await;
-		assert_matches!(
-			overseer_recv(&mut virtual_overseer).await,
-			AllMessages::CandidateBacking(
-				CandidateBackingMessage::CanSecond(request, tx),
-			) => {
-				assert_eq!(request.candidate_hash, candidate_hash);
-				assert_eq!(request.candidate_para_id, test_state.chain_ids[0]);
-				assert_eq!(request.parent_head_data_hash, parent_head_data_hash);
-				tx.send(false).expect("receiving side should be alive");
+
+		response_channel
+			.send(Ok((
+				request_v2::CollationFetchingResponse::Collation(
+					candidate_b.clone(),
+					PoV { block_data: BlockData(vec![1]) },
+				)
+				.encode(),
+				ProtocolName::from(""),
+			)))
+			.expect("Sending response should succeed");
+
+		// Persisted validation data of candidate B is not found.
+		assert_persisted_validation_data(
+			&mut virtual_overseer,
+			CollationVersion::V2,
+			head_c,
+			test_state.chain_ids[0],
+			Some(HeadData(vec![1]).hash()),
+			None,
+		)
+		.await;
+
+		// Now advertise, fetch and validate candidate A, which is the parent of B.
+
+		let mut candidate_a = dummy_candidate_receipt_bad_sig(head_c, Some(Default::default()));
+		candidate_a.descriptor.para_id = test_state.chain_ids[0];
+		candidate_a.descriptor.para_head = HeadData(vec![1]).hash();
+		candidate_a.descriptor.persisted_validation_data_hash =
+			PersistedValidationData::<Hash, BlockNumber> {
+				parent_head: HeadData(vec![0]),
+				relay_parent_number: 5,
+				max_pov_size: 1024,
+				relay_parent_storage_root: Default::default(),
 			}
-		);
+			.hash();
+		let candidate_a_commitments = CandidateCommitments {
+			head_data: HeadData(vec![1]),
+			horizontal_messages: Default::default(),
+			upward_messages: Default::default(),
+			new_validation_code: None,
+			processed_downward_messages: 0,
+			hrmp_watermark: 0,
+		};
+		candidate_a.commitments_hash = candidate_a_commitments.hash();
+
+		let candidate_a_hash = candidate_a.hash();
 
-		overseer_send(
+		advertise_collation(
 			&mut virtual_overseer,
-			CollatorProtocolMessage::Backed {
-				para_id: test_state.chain_ids[0],
-				para_head: parent_head_data_hash,
-			},
+			peer_a,
+			head_c,
+			Some((candidate_a_hash, HeadData(vec![0]).hash())),
 		)
 		.await;
 		assert_matches!(
@@ -1044,174 +1137,155 @@ fn backed_candidate_unblocks_advertisements() {
 			AllMessages::CandidateBacking(
 				CandidateBackingMessage::CanSecond(request, tx),
 			) => {
-				assert_eq!(request.candidate_hash, candidate_hash);
+				assert_eq!(request.candidate_hash, candidate_a_hash);
 				assert_eq!(request.candidate_para_id, test_state.chain_ids[0]);
-				assert_eq!(request.parent_head_data_hash, parent_head_data_hash);
+				assert_eq!(request.parent_head_data_hash, HeadData(vec![0]).hash());
 				tx.send(true).expect("receiving side should be alive");
 			}
 		);
-		assert_fetch_collation_request(
+
+		let response_channel = assert_fetch_collation_request(
 			&mut virtual_overseer,
-			head_d,
+			head_c,
 			test_state.chain_ids[0],
-			Some(candidate_hash),
+			Some(candidate_a_hash),
 		)
 		.await;
-		virtual_overseer
-	});
-}
 
-#[test]
-fn active_leave_unblocks_advertisements() {
-	let mut test_state = TestState::default();
-	test_state.group_rotation_info.group_rotation_frequency = 100;
+		response_channel
+			.send(Ok((
+				request_v2::CollationFetchingResponse::Collation(
+					candidate_a.clone(),
+					PoV { block_data: BlockData(vec![2]) },
+				)
+				.encode(),
+				ProtocolName::from(""),
+			)))
+			.expect("Sending response should succeed");
 
-	test_harness(ReputationAggregator::new(|_| true), |test_harness| async move {
-		let TestHarness { mut virtual_overseer, .. } = test_harness;
+		assert_persisted_validation_data(
+			&mut virtual_overseer,
+			CollationVersion::V2,
+			head_c,
+			test_state.chain_ids[0],
+			Some(HeadData(vec![0]).hash()),
+			Some(PersistedValidationData::<Hash, BlockNumber> {
+				parent_head: HeadData(vec![0]),
+				relay_parent_number: 5,
+				max_pov_size: 1024,
+				relay_parent_storage_root: Default::default(),
+			}),
+		)
+		.await;
 
-		let head_b = Hash::from_low_u64_be(128);
-		let head_b_num: u32 = 0;
+		assert_matches!(
+			overseer_recv(&mut virtual_overseer).await,
+			AllMessages::CandidateBacking(CandidateBackingMessage::Second(
+				relay_parent,
+				candidate_receipt,
+				received_pvd,
+				incoming_pov,
+			)) => {
+				assert_eq!(head_c, relay_parent);
+				assert_eq!(test_state.chain_ids[0], candidate_receipt.descriptor.para_id);
+				assert_eq!(PoV { block_data: BlockData(vec![2]) }, incoming_pov);
+				assert_eq!(PersistedValidationData::<Hash, BlockNumber> {
+					parent_head: HeadData(vec![0]),
+					relay_parent_number: 5,
+					max_pov_size: 1024,
+					relay_parent_storage_root: Default::default(),
+				}, received_pvd);
+				candidate_receipt
+			}
+		);
 
-		update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await;
+		// If candidate A is valid, proceed with seconding B.
+		if valid_parent {
+			send_seconded_statement(
+				&mut virtual_overseer,
+				keystore.clone(),
+				&CommittedCandidateReceipt {
+					descriptor: candidate_a.descriptor,
+					commitments: candidate_a_commitments,
+				},
+			)
+			.await;
 
-		let peers: Vec<CollatorPair> = (0..3).map(|_| CollatorPair::generate().0).collect();
-		let peer_ids: Vec<PeerId> = (0..3).map(|_| PeerId::random()).collect();
-		let candidates: Vec<CandidateHash> =
-			(0u8..3).map(|i| CandidateHash(Hash::repeat_byte(i))).collect();
+			assert_collation_seconded(&mut virtual_overseer, head_c, peer_a, CollationVersion::V2)
+				.await;
 
-		for (peer, peer_id) in peers.iter().zip(&peer_ids) {
-			connect_and_declare_collator(
+			// Now that candidate A has been seconded, candidate B can be seconded as well.
+
+			assert_persisted_validation_data(
 				&mut virtual_overseer,
-				*peer_id,
-				peer.clone(),
-				test_state.chain_ids[0],
 				CollationVersion::V2,
+				head_c,
+				test_state.chain_ids[0],
+				Some(HeadData(vec![1]).hash()),
+				Some(PersistedValidationData::<Hash, BlockNumber> {
+					parent_head: HeadData(vec![1]),
+					relay_parent_number: 5,
+					max_pov_size: 1024,
+					relay_parent_storage_root: Default::default(),
+				}),
 			)
 			.await;
-		}
 
-		let parent_head_data_hash = Hash::zero();
-		for (peer, candidate) in peer_ids.iter().zip(&candidates).take(2) {
-			advertise_collation(
+			assert_matches!(
+				overseer_recv(&mut virtual_overseer).await,
+				AllMessages::CandidateBacking(CandidateBackingMessage::Second(
+					relay_parent,
+					candidate_receipt,
+					received_pvd,
+					incoming_pov,
+				)) => {
+					assert_eq!(head_c, relay_parent);
+					assert_eq!(test_state.chain_ids[0], candidate_receipt.descriptor.para_id);
+					assert_eq!(PoV { block_data: BlockData(vec![1]) }, incoming_pov);
+					assert_eq!(PersistedValidationData::<Hash, BlockNumber> {
+						parent_head: HeadData(vec![1]),
+						relay_parent_number: 5,
+						max_pov_size: 1024,
+						relay_parent_storage_root: Default::default(),
+					}, received_pvd);
+					candidate_receipt
+				}
+			);
+
+			send_seconded_statement(
 				&mut virtual_overseer,
-				*peer,
-				head_b,
-				Some((*candidate, parent_head_data_hash)),
+				keystore.clone(),
+				&CommittedCandidateReceipt {
+					descriptor: candidate_b.descriptor,
+					commitments: candidate_b_commitments,
+				},
+			)
+			.await;
+
+			assert_collation_seconded(&mut virtual_overseer, head_c, peer_a, CollationVersion::V2)
+				.await;
+		} else {
+			// If candidate A is invalid, B won't be seconded.
+			overseer_send(
+				&mut virtual_overseer,
+				CollatorProtocolMessage::Invalid(head_c, candidate_a),
 			)
 			.await;
 
 			assert_matches!(
 				overseer_recv(&mut virtual_overseer).await,
-				AllMessages::CandidateBacking(
-					CandidateBackingMessage::CanSecond(request, tx),
+				AllMessages::NetworkBridgeTx(
+					NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(peer, rep)),
 				) => {
-					assert_eq!(request.candidate_hash, *candidate);
-					assert_eq!(request.candidate_para_id, test_state.chain_ids[0]);
-					assert_eq!(request.parent_head_data_hash, parent_head_data_hash);
-					// Send false.
-					tx.send(false).expect("receiving side should be alive");
+					assert_eq!(peer, peer_a);
+					assert_eq!(rep.value, COST_REPORT_BAD.cost_or_benefit());
 				}
 			);
 		}
 
-		let head_c = Hash::from_low_u64_be(127);
-		let head_c_num: u32 = 1;
-
-		let next_overseer_message =
-			update_view(&mut virtual_overseer, &test_state, vec![(head_c, head_c_num)], 1)
-				.await
-				.expect("should've sent request to backing");
-
-		// Unblock first request.
-		assert_matches!(
-			next_overseer_message,
-			AllMessages::CandidateBacking(
-				CandidateBackingMessage::CanSecond(request, tx),
-			) => {
-					assert_eq!(request.candidate_hash, candidates[0]);
-					assert_eq!(request.candidate_para_id, test_state.chain_ids[0]);
-					assert_eq!(request.parent_head_data_hash, parent_head_data_hash);
-					tx.send(true).expect("receiving side should be alive");
-			}
-		);
-
-		assert_fetch_collation_request(
-			&mut virtual_overseer,
-			head_b,
-			test_state.chain_ids[0],
-			Some(candidates[0]),
-		)
-		.await;
-
-		assert_matches!(
-			overseer_recv(&mut virtual_overseer).await,
-			AllMessages::CandidateBacking(
-				CandidateBackingMessage::CanSecond(request, tx),
-			) => {
-					assert_eq!(request.candidate_hash, candidates[1]);
-					assert_eq!(request.candidate_para_id, test_state.chain_ids[0]);
-					assert_eq!(request.parent_head_data_hash, parent_head_data_hash);
-					tx.send(false).expect("receiving side should be alive");
-			}
-		);
-
-		// Collation request was discarded.
 		test_helpers::Yield::new().await;
 		assert_matches!(virtual_overseer.recv().now_or_never(), None);
 
-		advertise_collation(
-			&mut virtual_overseer,
-			peer_ids[2],
-			head_c,
-			Some((candidates[2], parent_head_data_hash)),
-		)
-		.await;
-
-		assert_matches!(
-			overseer_recv(&mut virtual_overseer).await,
-			AllMessages::CandidateBacking(
-				CandidateBackingMessage::CanSecond(request, tx),
-			) => {
-				assert_eq!(request.candidate_hash, candidates[2]);
-				tx.send(false).expect("receiving side should be alive");
-			}
-		);
-
-		let head_d = Hash::from_low_u64_be(126);
-		let head_d_num: u32 = 2;
-
-		let next_overseer_message =
-			update_view(&mut virtual_overseer, &test_state, vec![(head_d, head_d_num)], 1)
-				.await
-				.expect("should've sent request to backing");
-
-		// Reject 2, accept 3.
-		assert_matches!(
-			next_overseer_message,
-			AllMessages::CandidateBacking(
-				CandidateBackingMessage::CanSecond(request, tx),
-			) => {
-				assert_eq!(request.candidate_hash, candidates[1]);
-				tx.send(false).expect("receiving side should be alive");
-			}
-		);
-		assert_matches!(
-			overseer_recv(&mut virtual_overseer).await,
-			AllMessages::CandidateBacking(
-				CandidateBackingMessage::CanSecond(request, tx),
-			) => {
-				assert_eq!(request.candidate_hash, candidates[2]);
-				tx.send(true).expect("receiving side should be alive");
-			}
-		);
-		assert_fetch_collation_request(
-			&mut virtual_overseer,
-			head_c,
-			test_state.chain_ids[0],
-			Some(candidates[2]),
-		)
-		.await;
-
 		virtual_overseer
 	});
 }
diff --git a/polkadot/node/network/statement-distribution/src/v2/candidates.rs b/polkadot/node/network/statement-distribution/src/v2/candidates.rs
index ad56ad4a2365b94e6ced0e43ce0747d99fb6a80d..a4f2455c28401f536d449268591e2f9746a9db81 100644
--- a/polkadot/node/network/statement-distribution/src/v2/candidates.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/candidates.rs
@@ -243,12 +243,12 @@ impl Candidates {
 	/// Whether statements from a candidate are importable.
 	///
 	/// This is only true when the candidate is known, confirmed,
-	/// and is importable in a fragment tree.
+	/// and is importable in a fragment chain.
 	pub fn is_importable(&self, candidate_hash: &CandidateHash) -> bool {
 		self.get_confirmed(candidate_hash).map_or(false, |c| c.is_importable(None))
 	}
 
-	/// Note that a candidate is importable in a fragment tree indicated by the given
+	/// Note that a candidate is importable in a fragment chain indicated by the given
 	/// leaf hash.
 	pub fn note_importable_under(&mut self, candidate: &HypotheticalCandidate, leaf_hash: Hash) {
 		match candidate {
diff --git a/polkadot/node/network/statement-distribution/src/v2/grid.rs b/polkadot/node/network/statement-distribution/src/v2/grid.rs
index 24d846c840e00c49446a0525b3768353f56ae524..b6e4163090c4d5fe00a22c61b50d99b74e69c8bd 100644
--- a/polkadot/node/network/statement-distribution/src/v2/grid.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/grid.rs
@@ -46,10 +46,8 @@
 //! - Request/response for the candidate + votes.
 //!   - Ignore if they are inconsistent with the manifest.
 //!   - A malicious backing group is capable of producing an unbounded number of backed candidates.
-//!     - We request the candidate only if the candidate has a hypothetical depth in any of our
-//!       fragment trees, and:
-//!     - the seconding validators have not seconded any other candidates at that depth in any of
-//!       those fragment trees
+//!     - We request the candidate only if the candidate is a hypothetical member in any of our
+//!       fragment chains, and:
 //! - All members of the group attempt to circulate all statements (in compact form) from the rest
 //!   of the group on candidates that have already been backed.
 //!   - They do this via the grid topology.
diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs
index 8579ac15cbc13f5186acea86f4dab492b943c4aa..961ec45bdada037885fb5ea43858e4e1522fac2c 100644
--- a/polkadot/node/network/statement-distribution/src/v2/mod.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs
@@ -37,7 +37,7 @@ use polkadot_node_primitives::{
 use polkadot_node_subsystem::{
 	messages::{
 		network_bridge_event::NewGossipTopology, CandidateBackingMessage, HypotheticalCandidate,
-		HypotheticalFrontierRequest, NetworkBridgeEvent, NetworkBridgeTxMessage,
+		HypotheticalMembershipRequest, NetworkBridgeEvent, NetworkBridgeTxMessage,
 		ProspectiveParachainsMessage,
 	},
 	overseer, ActivatedLeaf,
@@ -753,7 +753,7 @@ pub(crate) async fn handle_active_leaves_update<Context>(
 		}
 	}
 
-	new_leaf_fragment_tree_updates(ctx, state, activated.hash).await;
+	new_leaf_fragment_chain_updates(ctx, state, activated.hash).await;
 
 	Ok(())
 }
@@ -2216,7 +2216,7 @@ async fn determine_groups_per_para(
 }
 
 #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)]
-async fn fragment_tree_update_inner<Context>(
+async fn fragment_chain_update_inner<Context>(
 	ctx: &mut Context,
 	state: &mut State,
 	active_leaf_hash: Option<Hash>,
@@ -2230,31 +2230,34 @@ async fn fragment_tree_update_inner<Context>(
 	};
 
 	// 2. find out which are in the frontier
-	let frontier = {
+	gum::debug!(
+		target: LOG_TARGET,
+		"Calling getHypotheticalMembership from statement distribution"
+	);
+	let candidate_memberships = {
 		let (tx, rx) = oneshot::channel();
-		ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalFrontier(
-			HypotheticalFrontierRequest {
+		ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalMembership(
+			HypotheticalMembershipRequest {
 				candidates: hypotheticals,
-				fragment_tree_relay_parent: active_leaf_hash,
-				backed_in_path_only: false,
+				fragment_chain_relay_parent: active_leaf_hash,
 			},
 			tx,
 		))
 		.await;
 
 		match rx.await {
-			Ok(frontier) => frontier,
+			Ok(candidate_memberships) => candidate_memberships,
 			Err(oneshot::Canceled) => return,
 		}
 	};
 	// 3. note that they are importable under a given leaf hash.
-	for (hypo, membership) in frontier {
-		// skip parablocks outside of the frontier
+	for (hypo, membership) in candidate_memberships {
+		// skip parablocks which aren't potential candidates
 		if membership.is_empty() {
 			continue
 		}
 
-		for (leaf_hash, _) in membership {
+		for leaf_hash in membership {
 			state.candidates.note_importable_under(&hypo, leaf_hash);
 		}
 
@@ -2298,31 +2301,31 @@ async fn fragment_tree_update_inner<Context>(
 }
 
 #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)]
-async fn new_leaf_fragment_tree_updates<Context>(
+async fn new_leaf_fragment_chain_updates<Context>(
 	ctx: &mut Context,
 	state: &mut State,
 	leaf_hash: Hash,
 ) {
-	fragment_tree_update_inner(ctx, state, Some(leaf_hash), None, None).await
+	fragment_chain_update_inner(ctx, state, Some(leaf_hash), None, None).await
 }
 
 #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)]
-async fn prospective_backed_notification_fragment_tree_updates<Context>(
+async fn prospective_backed_notification_fragment_chain_updates<Context>(
 	ctx: &mut Context,
 	state: &mut State,
 	para_id: ParaId,
 	para_head: Hash,
 ) {
-	fragment_tree_update_inner(ctx, state, None, Some((para_head, para_id)), None).await
+	fragment_chain_update_inner(ctx, state, None, Some((para_head, para_id)), None).await
 }
 
 #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)]
-async fn new_confirmed_candidate_fragment_tree_updates<Context>(
+async fn new_confirmed_candidate_fragment_chain_updates<Context>(
 	ctx: &mut Context,
 	state: &mut State,
 	candidate: HypotheticalCandidate,
 ) {
-	fragment_tree_update_inner(ctx, state, None, None, Some(vec![candidate])).await
+	fragment_chain_update_inner(ctx, state, None, None, Some(vec![candidate])).await
 }
 
 struct ManifestImportSuccess<'a> {
@@ -2865,7 +2868,7 @@ pub(crate) async fn handle_backed_candidate_message<Context>(
 	.await;
 
 	// Search for children of the backed candidate to request.
-	prospective_backed_notification_fragment_tree_updates(
+	prospective_backed_notification_fragment_chain_updates(
 		ctx,
 		state,
 		confirmed.para_id(),
@@ -2956,7 +2959,8 @@ async fn apply_post_confirmation<Context>(
 		post_confirmation.hypothetical.relay_parent(),
 	)
 	.await;
-	new_confirmed_candidate_fragment_tree_updates(ctx, state, post_confirmation.hypothetical).await;
+	new_confirmed_candidate_fragment_chain_updates(ctx, state, post_confirmation.hypothetical)
+		.await;
 }
 
 /// Dispatch pending requests for candidate data & statements.
@@ -3185,8 +3189,8 @@ pub(crate) async fn handle_response<Context>(
 
 	let confirmed = state.candidates.get_confirmed(&candidate_hash).expect("just confirmed; qed");
 
-	// Although the candidate is confirmed, it isn't yet on the
-	// hypothetical frontier of the fragment tree. Later, when it is,
+	// Although the candidate is confirmed, it isn't yet a
+	// hypothetical member of the fragment chain. Later, when it is,
 	// we will import statements.
 	if !confirmed.is_importable(None) {
 		return
diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs
index 4fb033e08ce3af0724d5a8770bdb0abf942e38a5..fe51f953e244a560d7fe2e0bf414279fd0b8bf89 100644
--- a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs
@@ -111,8 +111,8 @@ fn share_seconded_circulated_to_cluster() {
 		);
 
 		// sharing a `Seconded` message confirms a candidate, which leads to new
-		// fragment tree updates.
-		answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+		// fragment chain updates.
+		answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 
 		overseer
 	});
@@ -509,7 +509,7 @@ fn seconded_statement_leads_to_request() {
 				if p == peer_a && r == BENEFIT_VALID_RESPONSE.into() => { }
 		);
 
-		answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+		answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 
 		overseer
 	});
@@ -583,7 +583,7 @@ fn cluster_statements_shared_seconded_first() {
 			.await;
 
 		// result of new confirmed candidate.
-		answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+		answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 
 		overseer
 			.send(FromOrchestra::Communication {
@@ -717,8 +717,8 @@ fn cluster_accounts_for_implicit_view() {
 		);
 
 		// sharing a `Seconded` message confirms a candidate, which leads to new
-		// fragment tree updates.
-		answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+		// fragment chain updates.
+		answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 
 		// activate new leaf, which has relay-parent in implicit view.
 		let next_relay_parent = Hash::repeat_byte(2);
@@ -855,7 +855,7 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() {
 			);
 		}
 
-		answer_expected_hypothetical_depth_request(
+		answer_expected_hypothetical_membership_request(
 			&mut overseer,
 			vec![(
 				HypotheticalCandidate::Complete {
@@ -863,7 +863,7 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() {
 					receipt: Arc::new(candidate.clone()),
 					persisted_validation_data: pvd.clone(),
 				},
-				vec![(relay_parent, vec![0])],
+				vec![relay_parent],
 			)],
 		)
 		.await;
@@ -978,7 +978,7 @@ fn cluster_messages_imported_after_new_leaf_importable_check() {
 			);
 		}
 
-		answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+		answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 
 		let next_relay_parent = Hash::repeat_byte(2);
 		let mut next_test_leaf = state.make_dummy_leaf(next_relay_parent);
@@ -996,7 +996,7 @@ fn cluster_messages_imported_after_new_leaf_importable_check() {
 					receipt: Arc::new(candidate.clone()),
 					persisted_validation_data: pvd.clone(),
 				},
-				vec![(relay_parent, vec![0])],
+				vec![relay_parent],
 			)],
 		)
 		.await;
@@ -1113,7 +1113,7 @@ fn ensure_seconding_limit_is_respected() {
 				AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a]
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		// Candidate 2.
@@ -1139,7 +1139,7 @@ fn ensure_seconding_limit_is_respected() {
 				AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a]
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		// Send first statement from peer A.
diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs
index 9d00a92e742bb6a304004ed74eb59ea8b6c5440c..d2bf031368c14a13f5da44dd29ba28376109f9bf 100644
--- a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs
@@ -129,7 +129,7 @@ fn backed_candidate_leads_to_advertisement() {
 				AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a]
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		// Send enough statements to make candidate backable, make sure announcements are sent.
@@ -224,7 +224,7 @@ fn backed_candidate_leads_to_advertisement() {
 				}
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		overseer
@@ -384,7 +384,7 @@ fn received_advertisement_before_confirmation_leads_to_request() {
 					if p == peer_c && r == BENEFIT_VALID_RESPONSE.into() => { }
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		overseer
@@ -515,7 +515,7 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() {
 			assert_peer_reported!(&mut overseer, peer_c, BENEFIT_VALID_STATEMENT);
 			assert_peer_reported!(&mut overseer, peer_c, BENEFIT_VALID_RESPONSE);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		// Receive Backed message.
@@ -546,7 +546,7 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() {
 				}
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		// Receive a manifest about the same candidate from peer D.
@@ -720,7 +720,7 @@ fn received_acknowledgements_for_locally_confirmed() {
 				AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a]
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		// Receive an unexpected acknowledgement from peer D.
@@ -785,7 +785,7 @@ fn received_acknowledgements_for_locally_confirmed() {
 				}
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		// Receive an unexpected acknowledgement from peer D.
@@ -918,7 +918,7 @@ fn received_acknowledgements_for_externally_confirmed() {
 			assert_peer_reported!(&mut overseer, peer_c, BENEFIT_VALID_STATEMENT);
 			assert_peer_reported!(&mut overseer, peer_c, BENEFIT_VALID_RESPONSE);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		let ack = BackedCandidateAcknowledgement {
@@ -1101,7 +1101,7 @@ fn received_advertisement_after_confirmation_before_backing() {
 					if p == peer_c && r == BENEFIT_VALID_RESPONSE.into()
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		// Receive advertisement from peer D (after confirmation but before backing).
@@ -1272,9 +1272,12 @@ fn additional_statements_are_shared_after_manifest_exchange() {
 			receipt: Arc::new(candidate.clone()),
 			persisted_validation_data: pvd.clone(),
 		};
-		let membership = vec![(relay_parent, vec![0])];
-		answer_expected_hypothetical_depth_request(&mut overseer, vec![(hypothetical, membership)])
-			.await;
+		let membership = vec![relay_parent];
+		answer_expected_hypothetical_membership_request(
+			&mut overseer,
+			vec![(hypothetical, membership)],
+		)
+		.await;
 
 		// Statements are sent to the Backing subsystem.
 		{
@@ -1338,7 +1341,7 @@ fn additional_statements_are_shared_after_manifest_exchange() {
 				}
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		// Receive a manifest about the same candidate from peer D. Contains different statements.
@@ -1507,7 +1510,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() {
 				AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a]
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		// Send enough statements to make candidate backable, make sure announcements are sent.
@@ -1574,7 +1577,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() {
 			})
 			.await;
 
-		answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+		answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 
 		// Relay parent enters view of peer C.
 		{
@@ -1721,7 +1724,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() {
 				AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a]
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		// Send enough statements to make candidate backable, make sure announcements are sent.
@@ -1816,7 +1819,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() {
 				}
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		// Peer leaves view.
@@ -1982,9 +1985,12 @@ fn inner_grid_statements_imported_to_backing(groups_for_first_para: usize) {
 			receipt: Arc::new(candidate.clone()),
 			persisted_validation_data: pvd.clone(),
 		};
-		let membership = vec![(relay_parent, vec![0])];
-		answer_expected_hypothetical_depth_request(&mut overseer, vec![(hypothetical, membership)])
-			.await;
+		let membership = vec![relay_parent];
+		answer_expected_hypothetical_membership_request(
+			&mut overseer,
+			vec![(hypothetical, membership)],
+		)
+		.await;
 
 		// Receive messages from Backing subsystem.
 		{
@@ -2616,7 +2622,7 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() {
 					if p == peer_c && r == BENEFIT_VALID_RESPONSE.into()
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		// Receive conflicting advertisement from peer C after confirmation.
@@ -2763,7 +2769,7 @@ fn inactive_local_participates_in_grid() {
 			AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r)))
 				if p == peer_a && r == BENEFIT_VALID_RESPONSE.into() => { }
 		);
-		answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+		answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 
 		overseer
 	});
diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs
index 3d987d3fc433fbf62c1b05675da864b82f019536..d32e2323ba346b3001677b7951d4257e6a18752e 100644
--- a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs
@@ -26,8 +26,8 @@ use polkadot_node_network_protocol::{
 };
 use polkadot_node_primitives::Statement;
 use polkadot_node_subsystem::messages::{
-	network_bridge_event::NewGossipTopology, AllMessages, ChainApiMessage, FragmentTreeMembership,
-	HypotheticalCandidate, NetworkBridgeEvent, ProspectiveParachainsMessage, ReportPeerMessage,
+	network_bridge_event::NewGossipTopology, AllMessages, ChainApiMessage, HypotheticalCandidate,
+	HypotheticalMembership, NetworkBridgeEvent, ProspectiveParachainsMessage, ReportPeerMessage,
 	RuntimeApiMessage, RuntimeApiRequest,
 };
 use polkadot_node_subsystem_test_helpers as test_helpers;
@@ -539,7 +539,7 @@ async fn activate_leaf(
 	leaf: &TestLeaf,
 	test_state: &TestState,
 	is_new_session: bool,
-	hypothetical_frontier: Vec<(HypotheticalCandidate, FragmentTreeMembership)>,
+	hypothetical_memberships: Vec<(HypotheticalCandidate, HypotheticalMembership)>,
 ) {
 	let activated = new_leaf(leaf.hash, leaf.number);
 
@@ -554,7 +554,7 @@ async fn activate_leaf(
 		leaf,
 		test_state,
 		is_new_session,
-		hypothetical_frontier,
+		hypothetical_memberships,
 	)
 	.await;
 }
@@ -564,7 +564,7 @@ async fn handle_leaf_activation(
 	leaf: &TestLeaf,
 	test_state: &TestState,
 	is_new_session: bool,
-	hypothetical_frontier: Vec<(HypotheticalCandidate, FragmentTreeMembership)>,
+	hypothetical_memberships: Vec<(HypotheticalCandidate, HypotheticalMembership)>,
 ) {
 	let TestLeaf {
 		number,
@@ -674,18 +674,17 @@ async fn handle_leaf_activation(
 				tx.send(Ok((validator_groups, group_rotation_info))).unwrap();
 			},
 			AllMessages::ProspectiveParachains(
-				ProspectiveParachainsMessage::GetHypotheticalFrontier(req, tx),
+				ProspectiveParachainsMessage::GetHypotheticalMembership(req, tx),
 			) => {
-				assert_eq!(req.fragment_tree_relay_parent, Some(*hash));
-				assert!(!req.backed_in_path_only);
-				for (i, (candidate, _)) in hypothetical_frontier.iter().enumerate() {
+				assert_eq!(req.fragment_chain_relay_parent, Some(*hash));
+				for (i, (candidate, _)) in hypothetical_memberships.iter().enumerate() {
 					assert!(
 						req.candidates.iter().any(|c| &c == &candidate),
 						"did not receive request for hypothetical candidate {}",
 						i,
 					);
 				}
-				tx.send(hypothetical_frontier).unwrap();
+				tx.send(hypothetical_memberships).unwrap();
 				// this is the last expected runtime api call
 				break
 			},
@@ -727,17 +726,16 @@ async fn handle_sent_request(
 	);
 }
 
-async fn answer_expected_hypothetical_depth_request(
+async fn answer_expected_hypothetical_membership_request(
 	virtual_overseer: &mut VirtualOverseer,
-	responses: Vec<(HypotheticalCandidate, FragmentTreeMembership)>,
+	responses: Vec<(HypotheticalCandidate, HypotheticalMembership)>,
 ) {
 	assert_matches!(
 		virtual_overseer.recv().await,
 		AllMessages::ProspectiveParachains(
-			ProspectiveParachainsMessage::GetHypotheticalFrontier(req, tx)
+			ProspectiveParachainsMessage::GetHypotheticalMembership(req, tx)
 		) => {
-			assert_eq!(req.fragment_tree_relay_parent, None);
-			assert!(!req.backed_in_path_only);
+			assert_eq!(req.fragment_chain_relay_parent, None);
 			for (i, (candidate, _)) in responses.iter().enumerate() {
 				assert!(
 					req.candidates.iter().any(|c| &c == &candidate),
diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs
index c9de42d2c4681b0b8cd13677964a85474b183201..38d7a10b86527c153f4a369beb8bbe86da05d582 100644
--- a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs
@@ -169,7 +169,7 @@ fn cluster_peer_allowed_to_send_incomplete_statements() {
 			);
 		}
 
-		answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+		answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 
 		overseer
 	});
@@ -339,7 +339,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() {
 					if p == peer_c && r == BENEFIT_VALID_RESPONSE.into()
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		// Peer C advertises candidate 2.
@@ -411,7 +411,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() {
 					if p == peer_c && r == BENEFIT_VALID_RESPONSE.into()
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		// Peer C sends an announcement for candidate 3. Should hit seconding limit for validator 1.
@@ -634,7 +634,7 @@ fn peer_reported_for_not_enough_statements() {
 					if p == peer_c && r == BENEFIT_VALID_RESPONSE.into()
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		overseer
@@ -789,7 +789,7 @@ fn peer_reported_for_duplicate_statements() {
 			);
 		}
 
-		answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+		answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 
 		overseer
 	});
@@ -919,7 +919,7 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() {
 					if p == peer_a && r == BENEFIT_VALID_RESPONSE.into() => { }
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		overseer
@@ -1049,7 +1049,7 @@ fn peer_reported_for_providing_statements_with_wrong_validator_id() {
 					if p == peer_a && r == BENEFIT_VALID_RESPONSE.into() => { }
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		overseer
@@ -1215,7 +1215,7 @@ fn disabled_validators_added_to_unwanted_mask() {
 					assert_eq!(statement, seconded_b);
 				}
 			);
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		overseer
@@ -1372,7 +1372,7 @@ fn when_validator_disabled_after_sending_the_request() {
 					assert_eq!(statement, seconded_b);
 				}
 			);
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		overseer
@@ -1475,7 +1475,7 @@ fn no_response_for_grid_request_not_meeting_quorum() {
 				AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a]
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		// Send enough statements to make candidate backable, make sure announcements are sent.
@@ -1572,7 +1572,7 @@ fn no_response_for_grid_request_not_meeting_quorum() {
 				}
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		let mask = StatementFilter {
@@ -1720,7 +1720,7 @@ fn disabling_works_from_the_latest_state_not_relay_parent() {
 					if p == peer_disabled && r == BENEFIT_VALID_RESPONSE.into() => { }
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		activate_leaf(&mut overseer, &leaf_2, &state, false, vec![]).await;
@@ -1862,7 +1862,7 @@ fn local_node_sanity_checks_incoming_requests() {
 				}
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		// Should drop requests from unknown peers.
@@ -2036,7 +2036,7 @@ fn local_node_checks_that_peer_can_request_before_responding() {
 			}
 		);
 
-		answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+		answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 
 		// Local node should respond to requests from peers in the same group
 		// which appear to not have already seen the candidate
@@ -2248,7 +2248,7 @@ fn local_node_respects_statement_mask() {
 				AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a]
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		// Send enough statements to make candidate backable, make sure announcements are sent.
@@ -2347,7 +2347,7 @@ fn local_node_respects_statement_mask() {
 				}
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		// `1` indicates statements NOT to request.
@@ -2600,7 +2600,7 @@ fn should_delay_before_retrying_dropped_requests() {
 					if p == peer_c && r == BENEFIT_VALID_RESPONSE.into()
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		// Sleep for the given amount of time. This should reset the delay for the first candidate.
@@ -2691,7 +2691,7 @@ fn should_delay_before_retrying_dropped_requests() {
 					if p == peer_c && r == BENEFIT_VALID_RESPONSE.into()
 			);
 
-			answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await;
+			answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await;
 		}
 
 		overseer
diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs
index e75d80395c4ba0371b58ce1383db2a1f364eaad8..2a54b3aed301e7dcdcdaf87fe49775b43954a102 100644
--- a/polkadot/node/subsystem-types/src/messages.rs
+++ b/polkadot/node/subsystem-types/src/messages.rs
@@ -62,7 +62,7 @@ pub mod network_bridge_event;
 pub use network_bridge_event::NetworkBridgeEvent;
 
 /// A request to the candidate backing subsystem to check whether
-/// there exists vacant membership in some fragment tree.
+/// we can second this candidate.
 #[derive(Debug, Copy, Clone)]
 pub struct CanSecondRequest {
 	/// Para id of the candidate.
@@ -90,10 +90,12 @@ pub enum CandidateBackingMessage {
 		oneshot::Sender<HashMap<ParaId, Vec<BackedCandidate>>>,
 	),
 	/// Request the subsystem to check whether it's allowed to second given candidate.
-	/// The rule is to only fetch collations that are either built on top of the root
-	/// of some fragment tree or have a parent node which represents backed candidate.
+	/// The rule is to only fetch collations that can either be directly chained to any
+	/// FragmentChain in the view or there is at least one FragmentChain where this candidate is a
+	/// potentially unconnected candidate (we predict that it may become connected to a
+	/// FragmentChain in the future).
 	///
-	/// Always responses with `false` if async backing is disabled for candidate's relay
+	/// Always responds with `false` if async backing is disabled for candidate's relay
 	/// parent.
 	CanSecond(CanSecondRequest, oneshot::Sender<bool>),
 	/// Note that the Candidate Backing subsystem should second the given candidate in the context
@@ -244,13 +246,6 @@ pub enum CollatorProtocolMessage {
 	///
 	/// The hash is the relay parent.
 	Seconded(Hash, SignedFullStatement),
-	/// The candidate received enough validity votes from the backing group.
-	Backed {
-		/// Candidate's para id.
-		para_id: ParaId,
-		/// Hash of the para head generated by candidate.
-		para_head: Hash,
-	},
 }
 
 impl Default for CollatorProtocolMessage {
@@ -1023,9 +1018,9 @@ pub enum GossipSupportMessage {
 	NetworkBridgeUpdate(NetworkBridgeEvent<net_protocol::GossipSupportNetworkMessage>),
 }
 
-/// Request introduction of a candidate into the prospective parachains subsystem.
+/// Request introduction of a seconded candidate into the prospective parachains subsystem.
 #[derive(Debug, PartialEq, Eq, Clone)]
-pub struct IntroduceCandidateRequest {
+pub struct IntroduceSecondedCandidateRequest {
 	/// The para-id of the candidate.
 	pub candidate_para: ParaId,
 	/// The candidate receipt itself.
@@ -1034,7 +1029,7 @@ pub struct IntroduceCandidateRequest {
 	pub persisted_validation_data: PersistedValidationData,
 }
 
-/// A hypothetical candidate to be evaluated for frontier membership
+/// A hypothetical candidate to be evaluated for potential/actual membership
 /// in the prospective parachains subsystem.
 ///
 /// Hypothetical candidates are either complete or incomplete.
@@ -1103,21 +1098,27 @@ impl HypotheticalCandidate {
 				candidate_relay_parent,
 		}
 	}
+
+	/// Get the output head data hash, if the candidate is complete.
+	pub fn output_head_data_hash(&self) -> Option<Hash> {
+		match *self {
+			HypotheticalCandidate::Complete { ref receipt, .. } =>
+				Some(receipt.descriptor.para_head),
+			HypotheticalCandidate::Incomplete { .. } => None,
+		}
+	}
 }
 
 /// Request specifying which candidates are either already included
-/// or might be included in the hypothetical frontier of fragment trees
-/// under a given active leaf.
+/// or might become included in fragment chain under a given active leaf (or any active leaf if
+/// `fragment_chain_relay_parent` is `None`).
 #[derive(Debug, PartialEq, Eq, Clone)]
-pub struct HypotheticalFrontierRequest {
+pub struct HypotheticalMembershipRequest {
 	/// Candidates, in arbitrary order, which should be checked for
-	/// possible membership in fragment trees.
+	/// hypothetical/actual membership in fragment chains.
 	pub candidates: Vec<HypotheticalCandidate>,
-	/// Either a specific fragment tree to check, otherwise all.
-	pub fragment_tree_relay_parent: Option<Hash>,
-	/// Only return membership if all candidates in the path from the
-	/// root are backed.
-	pub backed_in_path_only: bool,
+	/// Either a specific fragment chain to check, otherwise all.
+	pub fragment_chain_relay_parent: Option<Hash>,
 }
 
 /// A request for the persisted validation data stored in the prospective
@@ -1156,9 +1157,9 @@ impl ParentHeadData {
 	}
 }
 
-/// Indicates the relay-parents whose fragment tree a candidate
-/// is present in and the depths of that tree the candidate is present in.
-pub type FragmentTreeMembership = Vec<(Hash, Vec<usize>)>;
+/// Indicates the relay-parents whose fragment chain a candidate
+/// is present in or can be added in (right now or in the future).
+pub type HypotheticalMembership = Vec<Hash>;
 
 /// A collection of ancestor candidates of a parachain.
 pub type Ancestors = HashSet<CandidateHash>;
@@ -1166,15 +1167,11 @@ pub type Ancestors = HashSet<CandidateHash>;
 /// Messages sent to the Prospective Parachains subsystem.
 #[derive(Debug)]
 pub enum ProspectiveParachainsMessage {
-	/// Inform the Prospective Parachains Subsystem of a new candidate.
+	/// Inform the Prospective Parachains Subsystem of a new seconded candidate.
 	///
-	/// The response sender accepts the candidate membership, which is the existing
-	/// membership of the candidate if it was already known.
-	IntroduceCandidate(IntroduceCandidateRequest, oneshot::Sender<FragmentTreeMembership>),
-	/// Inform the Prospective Parachains Subsystem that a previously introduced candidate
-	/// has been seconded. This requires that the candidate was successfully introduced in
-	/// the past.
-	CandidateSeconded(ParaId, CandidateHash),
+	/// The response sender returns false if the candidate was rejected by prospective parachains,
+	/// true otherwise (if it was accepted or already present)
+	IntroduceSecondedCandidate(IntroduceSecondedCandidateRequest, oneshot::Sender<bool>),
 	/// Inform the Prospective Parachains Subsystem that a previously introduced candidate
 	/// has been backed. This requires that the candidate was successfully introduced in
 	/// the past.
@@ -1193,23 +1190,29 @@ pub enum ProspectiveParachainsMessage {
 		Ancestors,
 		oneshot::Sender<Vec<(CandidateHash, Hash)>>,
 	),
-	/// Get the hypothetical frontier membership of candidates with the given properties
-	/// under the specified active leaves' fragment trees.
+	/// Get the hypothetical or actual membership of candidates with the given properties
+	/// under the specified active leave's fragment chain.
+	///
+	/// For each candidate, we return a vector of leaves where the candidate is present or could be
+	/// added. "Could be added" either means that the candidate can be added to the chain right now
+	/// or could be added in the future (we may not have its ancestors yet).
+	/// Note that even if we think it could be added in the future, we may find out that it was
+	/// invalid, as time passes.
+	/// If an active leaf is not in the vector, it means that there's no
+	/// chance this candidate will become valid under that leaf in the future.
 	///
-	/// For any candidate which is already known, this returns the depths the candidate
-	/// occupies.
-	GetHypotheticalFrontier(
-		HypotheticalFrontierRequest,
-		oneshot::Sender<Vec<(HypotheticalCandidate, FragmentTreeMembership)>>,
+	/// If `fragment_chain_relay_parent` in the request is `Some()`, the return vector can only
+	/// contain this relay parent (or none).
+	GetHypotheticalMembership(
+		HypotheticalMembershipRequest,
+		oneshot::Sender<Vec<(HypotheticalCandidate, HypotheticalMembership)>>,
 	),
-	/// Get the membership of the candidate in all fragment trees.
-	GetTreeMembership(ParaId, CandidateHash, oneshot::Sender<FragmentTreeMembership>),
-	/// Get the minimum accepted relay-parent number for each para in the fragment tree
+	/// Get the minimum accepted relay-parent number for each para in the fragment chain
 	/// for the given relay-chain block hash.
 	///
 	/// That is, if the block hash is known and is an active leaf, this returns the
 	/// minimum relay-parent block number in the same branch of the relay chain which
-	/// is accepted in the fragment tree for each para-id.
+	/// is accepted in the fragment chain for each para-id.
 	///
 	/// If the block hash is not an active leaf, this will return an empty vector.
 	///
@@ -1219,8 +1222,10 @@ pub enum ProspectiveParachainsMessage {
 	/// Para-IDs are returned in no particular order.
 	GetMinimumRelayParents(Hash, oneshot::Sender<Vec<(ParaId, BlockNumber)>>),
 	/// Get the validation data of some prospective candidate. The candidate doesn't need
-	/// to be part of any fragment tree, but this only succeeds if the parent head-data and
-	/// relay-parent are part of some fragment tree.
+	/// to be part of any fragment chain, but this only succeeds if the parent head-data and
+	/// relay-parent are part of the `CandidateStorage` (meaning that it's a candidate which is
+	/// part of some fragment chain or which prospective-parachains predicted will become part of
+	/// some fragment chain).
 	GetProspectiveValidationData(
 		ProspectiveValidationDataRequest,
 		oneshot::Sender<Option<PersistedValidationData>>,
diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs
index d38d838fedefac643253528ab99d0035eeae55da..b5aef325c8b437ec43c2872f130651de65c28a52 100644
--- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs
+++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs
@@ -39,8 +39,8 @@
 ///
 /// # Usage
 ///
-/// It's expected that the users of this module will be building up trees of
-/// [`Fragment`]s and consistently pruning and adding to the tree.
+/// It's expected that the users of this module will be building up chains of
+/// [`Fragment`]s and consistently pruning and adding to the chains.
 ///
 /// ## Operating Constraints
 ///
@@ -54,60 +54,65 @@
 /// make an intelligent prediction about what might be accepted in the future based on
 /// prior fragments that also exist off-chain.
 ///
-/// ## Fragment Trees
+/// ## Fragment Chains
+///
+/// For simplicity and practicality, we expect that collators of the same parachain are
+/// cooperating and don't create parachain forks or cycles on the same relay chain active leaf.
+/// Therefore, higher-level code should maintain one fragment chain for each active leaf (not a
+/// fragment tree). If parachains do create forks, their performance in regards to async
+/// backing and elastic scaling will suffer, because different validators will have different
+/// predictions of the future.
 ///
 /// As the relay-chain grows, some predictions come true and others come false.
 /// And new predictions get made. These three changes correspond distinctly to the
-/// 3 primary operations on fragment trees.
-///
-/// A fragment tree is a mental model for thinking about a forking series of predictions
-/// about a single parachain. There may be one or more fragment trees per parachain.
-///
-/// In expectation, most parachains will have a plausibly-unique authorship method which means
-/// that they should really be much closer to fragment-chains, maybe with an occasional fork.
+/// 3 primary operations on fragment chains.
 ///
-/// Avoiding fragment-tree blowup is beyond the scope of this module.
+/// Avoiding fragment-chain blowup is beyond the scope of this module. Higher-level must ensure
+/// proper spam protection.
 ///
-/// ### Pruning Fragment Trees
+/// ### Pruning Fragment Chains
 ///
 /// When the relay-chain advances, we want to compare the new constraints of that relay-parent
-/// to the roots of the fragment trees we have. There are 3 cases:
+/// to the root of the fragment chain we have. There are 3 cases:
 ///
 /// 1. The root fragment is still valid under the new constraints. In this case, we do nothing.
-///    This is the "prediction still uncertain" case.
+///    This is the "prediction still uncertain" case. (Corresponds to some candidates still
+///    being pending availability).
 ///
-/// 2. The root fragment is invalid under the new constraints because it has been subsumed by
-///    the relay-chain. In this case, we can discard the root and split & re-root the fragment
-///    tree under its descendants and compare to the new constraints again. This is the
-///    "prediction came true" case.
+/// 2. The root fragment (potentially along with a number of descendants) is invalid under the
+///    new constraints because it has been included by the relay-chain. In this case, we can
+///    discard the included chain and split & re-root the chain under its descendants and
+///    compare to the new constraints again. This is the "prediction came true" case.
 ///
-/// 3. The root fragment is invalid under the new constraints because a competing parachain
-///    block has been included or it would never be accepted for some other reason. In this
-///    case we can discard the entire fragment tree. This is the "prediction came false" case.
+/// 3. The root fragment becomes invalid under the new constraints for any reason (if for
+///    example the parachain produced a fork and the block producer picked a different
+///    candidate to back). In this case we can discard the entire fragment chain. This is the
+///    "prediction came false" case.
 ///
 /// This is all a bit of a simplification because it assumes that the relay-chain advances
-/// without forks and is finalized instantly. In practice, the set of fragment-trees needs to
+/// without forks and is finalized instantly. In practice, the set of fragment-chains needs to
 /// be observable from the perspective of a few different possible forks of the relay-chain and
 /// not pruned too eagerly.
 ///
 /// Note that the fragments themselves don't need to change and the only thing we care about
 /// is whether the predictions they represent are still valid.
 ///
-/// ### Extending Fragment Trees
+/// ### Extending Fragment Chains
 ///
 /// As predictions fade into the past, new ones should be stacked on top.
 ///
 /// Every new relay-chain block is an opportunity to make a new prediction about the future.
-/// Higher-level logic should select the leaves of the fragment-trees to build upon or whether
-/// to create a new fragment-tree.
+/// Higher-level logic should decide whether to build upon an existing chain or whether
+/// to create a new fragment-chain.
 ///
 /// ### Code Upgrades
 ///
 /// Code upgrades are the main place where this emulation fails. The on-chain PVF upgrade
 /// scheduling logic is very path-dependent and intricate so we just assume that code upgrades
-/// can't be initiated and applied within a single fragment-tree. Fragment-trees aren't deep,
-/// in practice and code upgrades are fairly rare. So what's likely to happen around code
-/// upgrades is that the entire fragment-tree has to get discarded at some point.
+/// can't be initiated and applied within a single fragment-chain. Fragment-chains aren't deep,
+/// in practice (bounded by a linear function of the the number of cores assigned to a
+/// parachain) and code upgrades are fairly rare. So what's likely to happen around code
+/// upgrades is that the entire fragment-chain has to get discarded at some point.
 ///
 /// That means a few blocks of execution time lost, which is not a big deal for code upgrades
 /// in practice at most once every few weeks.
@@ -116,10 +121,7 @@ use polkadot_primitives::{
 	CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, PersistedValidationData,
 	UpgradeRestriction, ValidationCodeHash,
 };
-use std::{
-	borrow::{Borrow, Cow},
-	collections::HashMap,
-};
+use std::{collections::HashMap, sync::Arc};
 
 /// Constraints on inbound HRMP channels.
 #[derive(Debug, Clone, PartialEq)]
@@ -524,9 +526,9 @@ impl ConstraintModifications {
 /// here. But the erasure-root is not. This means that prospective candidates
 /// are not correlated to any session in particular.
 #[derive(Debug, Clone, PartialEq)]
-pub struct ProspectiveCandidate<'a> {
+pub struct ProspectiveCandidate {
 	/// The commitments to the output of the execution.
-	pub commitments: Cow<'a, CandidateCommitments>,
+	pub commitments: CandidateCommitments,
 	/// The collator that created the candidate.
 	pub collator: CollatorId,
 	/// The signature of the collator on the payload.
@@ -539,32 +541,6 @@ pub struct ProspectiveCandidate<'a> {
 	pub validation_code_hash: ValidationCodeHash,
 }
 
-impl<'a> ProspectiveCandidate<'a> {
-	fn into_owned(self) -> ProspectiveCandidate<'static> {
-		ProspectiveCandidate { commitments: Cow::Owned(self.commitments.into_owned()), ..self }
-	}
-
-	/// Partially clone the prospective candidate, but borrow the
-	/// parts which are potentially heavy.
-	pub fn partial_clone(&self) -> ProspectiveCandidate {
-		ProspectiveCandidate {
-			commitments: Cow::Borrowed(self.commitments.borrow()),
-			collator: self.collator.clone(),
-			collator_signature: self.collator_signature.clone(),
-			persisted_validation_data: self.persisted_validation_data.clone(),
-			pov_hash: self.pov_hash,
-			validation_code_hash: self.validation_code_hash,
-		}
-	}
-}
-
-#[cfg(test)]
-impl ProspectiveCandidate<'static> {
-	fn commitments_mut(&mut self) -> &mut CandidateCommitments {
-		self.commitments.to_mut()
-	}
-}
-
 /// Kinds of errors with the validity of a fragment.
 #[derive(Debug, Clone, PartialEq)]
 pub enum FragmentValidityError {
@@ -618,19 +594,19 @@ pub enum FragmentValidityError {
 /// This is a type which guarantees that the candidate is valid under the
 /// operating constraints.
 #[derive(Debug, Clone, PartialEq)]
-pub struct Fragment<'a> {
+pub struct Fragment {
 	/// The new relay-parent.
 	relay_parent: RelayChainBlockInfo,
 	/// The constraints this fragment is operating under.
 	operating_constraints: Constraints,
 	/// The core information about the prospective candidate.
-	candidate: ProspectiveCandidate<'a>,
+	candidate: Arc<ProspectiveCandidate>,
 	/// Modifications to the constraints based on the outputs of
 	/// the candidate.
 	modifications: ConstraintModifications,
 }
 
-impl<'a> Fragment<'a> {
+impl Fragment {
 	/// Create a new fragment.
 	///
 	/// This fails if the fragment isn't in line with the operating
@@ -642,10 +618,29 @@ impl<'a> Fragment<'a> {
 	pub fn new(
 		relay_parent: RelayChainBlockInfo,
 		operating_constraints: Constraints,
-		candidate: ProspectiveCandidate<'a>,
+		candidate: Arc<ProspectiveCandidate>,
 	) -> Result<Self, FragmentValidityError> {
+		let modifications = Self::check_against_constraints(
+			&relay_parent,
+			&operating_constraints,
+			&candidate.commitments,
+			&candidate.validation_code_hash,
+			&candidate.persisted_validation_data,
+		)?;
+
+		Ok(Fragment { relay_parent, operating_constraints, candidate, modifications })
+	}
+
+	/// Check the candidate against the operating constrains and return the constraint modifications
+	/// made by this candidate.
+	pub fn check_against_constraints(
+		relay_parent: &RelayChainBlockInfo,
+		operating_constraints: &Constraints,
+		commitments: &CandidateCommitments,
+		validation_code_hash: &ValidationCodeHash,
+		persisted_validation_data: &PersistedValidationData,
+	) -> Result<ConstraintModifications, FragmentValidityError> {
 		let modifications = {
-			let commitments = &candidate.commitments;
 			ConstraintModifications {
 				required_parent: Some(commitments.head_data.clone()),
 				hrmp_watermark: Some({
@@ -689,11 +684,13 @@ impl<'a> Fragment<'a> {
 		validate_against_constraints(
 			&operating_constraints,
 			&relay_parent,
-			&candidate,
+			commitments,
+			persisted_validation_data,
+			validation_code_hash,
 			&modifications,
 		)?;
 
-		Ok(Fragment { relay_parent, operating_constraints, candidate, modifications })
+		Ok(modifications)
 	}
 
 	/// Access the relay parent information.
@@ -707,7 +704,7 @@ impl<'a> Fragment<'a> {
 	}
 
 	/// Access the underlying prospective candidate.
-	pub fn candidate(&self) -> &ProspectiveCandidate<'a> {
+	pub fn candidate(&self) -> &ProspectiveCandidate {
 		&self.candidate
 	}
 
@@ -715,31 +712,14 @@ impl<'a> Fragment<'a> {
 	pub fn constraint_modifications(&self) -> &ConstraintModifications {
 		&self.modifications
 	}
-
-	/// Convert the fragment into an owned variant.
-	pub fn into_owned(self) -> Fragment<'static> {
-		Fragment { candidate: self.candidate.into_owned(), ..self }
-	}
-
-	/// Validate this fragment against some set of constraints
-	/// instead of the operating constraints.
-	pub fn validate_against_constraints(
-		&self,
-		constraints: &Constraints,
-	) -> Result<(), FragmentValidityError> {
-		validate_against_constraints(
-			constraints,
-			&self.relay_parent,
-			&self.candidate,
-			&self.modifications,
-		)
-	}
 }
 
 fn validate_against_constraints(
 	constraints: &Constraints,
 	relay_parent: &RelayChainBlockInfo,
-	candidate: &ProspectiveCandidate,
+	commitments: &CandidateCommitments,
+	persisted_validation_data: &PersistedValidationData,
+	validation_code_hash: &ValidationCodeHash,
 	modifications: &ConstraintModifications,
 ) -> Result<(), FragmentValidityError> {
 	let expected_pvd = PersistedValidationData {
@@ -749,17 +729,17 @@ fn validate_against_constraints(
 		max_pov_size: constraints.max_pov_size as u32,
 	};
 
-	if expected_pvd != candidate.persisted_validation_data {
+	if expected_pvd != *persisted_validation_data {
 		return Err(FragmentValidityError::PersistedValidationDataMismatch(
 			expected_pvd,
-			candidate.persisted_validation_data.clone(),
+			persisted_validation_data.clone(),
 		))
 	}
 
-	if constraints.validation_code_hash != candidate.validation_code_hash {
+	if constraints.validation_code_hash != *validation_code_hash {
 		return Err(FragmentValidityError::ValidationCodeMismatch(
 			constraints.validation_code_hash,
-			candidate.validation_code_hash,
+			*validation_code_hash,
 		))
 	}
 
@@ -770,7 +750,7 @@ fn validate_against_constraints(
 		))
 	}
 
-	if candidate.commitments.new_validation_code.is_some() {
+	if commitments.new_validation_code.is_some() {
 		match constraints.upgrade_restriction {
 			None => {},
 			Some(UpgradeRestriction::Present) =>
@@ -778,11 +758,8 @@ fn validate_against_constraints(
 		}
 	}
 
-	let announced_code_size = candidate
-		.commitments
-		.new_validation_code
-		.as_ref()
-		.map_or(0, |code| code.0.len());
+	let announced_code_size =
+		commitments.new_validation_code.as_ref().map_or(0, |code| code.0.len());
 
 	if announced_code_size > constraints.max_code_size {
 		return Err(FragmentValidityError::CodeSizeTooLarge(
@@ -801,17 +778,17 @@ fn validate_against_constraints(
 		}
 	}
 
-	if candidate.commitments.horizontal_messages.len() > constraints.max_hrmp_num_per_candidate {
+	if commitments.horizontal_messages.len() > constraints.max_hrmp_num_per_candidate {
 		return Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow {
 			messages_allowed: constraints.max_hrmp_num_per_candidate,
-			messages_submitted: candidate.commitments.horizontal_messages.len(),
+			messages_submitted: commitments.horizontal_messages.len(),
 		})
 	}
 
-	if candidate.commitments.upward_messages.len() > constraints.max_ump_num_per_candidate {
+	if commitments.upward_messages.len() > constraints.max_ump_num_per_candidate {
 		return Err(FragmentValidityError::UmpMessagesPerCandidateOverflow {
 			messages_allowed: constraints.max_ump_num_per_candidate,
-			messages_submitted: candidate.commitments.upward_messages.len(),
+			messages_submitted: commitments.upward_messages.len(),
 		})
 	}
 
@@ -1184,21 +1161,21 @@ mod tests {
 	fn make_candidate(
 		constraints: &Constraints,
 		relay_parent: &RelayChainBlockInfo,
-	) -> ProspectiveCandidate<'static> {
+	) -> ProspectiveCandidate {
 		let collator_pair = CollatorPair::generate().0;
 		let collator = collator_pair.public();
 
 		let sig = collator_pair.sign(b"blabla".as_slice());
 
 		ProspectiveCandidate {
-			commitments: Cow::Owned(CandidateCommitments {
+			commitments: CandidateCommitments {
 				upward_messages: Default::default(),
 				horizontal_messages: Default::default(),
 				new_validation_code: None,
 				head_data: HeadData::from(vec![1, 2, 3, 4, 5]),
 				processed_downward_messages: 0,
 				hrmp_watermark: relay_parent.number,
-			}),
+			},
 			collator,
 			collator_signature: sig,
 			persisted_validation_data: PersistedValidationData {
@@ -1229,7 +1206,7 @@ mod tests {
 		candidate.validation_code_hash = got_code;
 
 		assert_eq!(
-			Fragment::new(relay_parent, constraints, candidate),
+			Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())),
 			Err(FragmentValidityError::ValidationCodeMismatch(expected_code, got_code,)),
 		)
 	}
@@ -1261,7 +1238,7 @@ mod tests {
 		let got_pvd = candidate.persisted_validation_data.clone();
 
 		assert_eq!(
-			Fragment::new(relay_parent_b, constraints, candidate),
+			Fragment::new(relay_parent_b, constraints, Arc::new(candidate.clone())),
 			Err(FragmentValidityError::PersistedValidationDataMismatch(expected_pvd, got_pvd,)),
 		);
 	}
@@ -1278,10 +1255,10 @@ mod tests {
 		let mut candidate = make_candidate(&constraints, &relay_parent);
 
 		let max_code_size = constraints.max_code_size;
-		candidate.commitments_mut().new_validation_code = Some(vec![0; max_code_size + 1].into());
+		candidate.commitments.new_validation_code = Some(vec![0; max_code_size + 1].into());
 
 		assert_eq!(
-			Fragment::new(relay_parent, constraints, candidate),
+			Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())),
 			Err(FragmentValidityError::CodeSizeTooLarge(max_code_size, max_code_size + 1,)),
 		);
 	}
@@ -1298,7 +1275,7 @@ mod tests {
 		let candidate = make_candidate(&constraints, &relay_parent);
 
 		assert_eq!(
-			Fragment::new(relay_parent, constraints, candidate),
+			Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())),
 			Err(FragmentValidityError::RelayParentTooOld(5, 3,)),
 		);
 	}
@@ -1317,7 +1294,7 @@ mod tests {
 		let max_hrmp = constraints.max_hrmp_num_per_candidate;
 
 		candidate
-			.commitments_mut()
+			.commitments
 			.horizontal_messages
 			.try_extend((0..max_hrmp + 1).map(|i| OutboundHrmpMessage {
 				recipient: ParaId::from(i as u32),
@@ -1326,7 +1303,7 @@ mod tests {
 			.unwrap();
 
 		assert_eq!(
-			Fragment::new(relay_parent, constraints, candidate),
+			Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())),
 			Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow {
 				messages_allowed: max_hrmp,
 				messages_submitted: max_hrmp + 1,
@@ -1346,22 +1323,36 @@ mod tests {
 		let mut candidate = make_candidate(&constraints, &relay_parent);
 
 		// Empty dmp queue is ok.
-		assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok());
+		assert!(Fragment::new(
+			relay_parent.clone(),
+			constraints.clone(),
+			Arc::new(candidate.clone())
+		)
+		.is_ok());
 		// Unprocessed message that was sent later is ok.
 		constraints.dmp_remaining_messages = vec![relay_parent.number + 1];
-		assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok());
+		assert!(Fragment::new(
+			relay_parent.clone(),
+			constraints.clone(),
+			Arc::new(candidate.clone())
+		)
+		.is_ok());
 
 		for block_number in 0..=relay_parent.number {
 			constraints.dmp_remaining_messages = vec![block_number];
 
 			assert_eq!(
-				Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()),
+				Fragment::new(
+					relay_parent.clone(),
+					constraints.clone(),
+					Arc::new(candidate.clone())
+				),
 				Err(FragmentValidityError::DmpAdvancementRule),
 			);
 		}
 
-		candidate.commitments.to_mut().processed_downward_messages = 1;
-		assert!(Fragment::new(relay_parent, constraints, candidate).is_ok());
+		candidate.commitments.processed_downward_messages = 1;
+		assert!(Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())).is_ok());
 	}
 
 	#[test]
@@ -1379,13 +1370,12 @@ mod tests {
 
 		candidate
 			.commitments
-			.to_mut()
 			.upward_messages
 			.try_extend((0..max_ump + 1).map(|i| vec![i as u8]))
 			.unwrap();
 
 		assert_eq!(
-			Fragment::new(relay_parent, constraints, candidate),
+			Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())),
 			Err(FragmentValidityError::UmpMessagesPerCandidateOverflow {
 				messages_allowed: max_ump,
 				messages_submitted: max_ump + 1,
@@ -1405,10 +1395,10 @@ mod tests {
 		let mut candidate = make_candidate(&constraints, &relay_parent);
 
 		constraints.upgrade_restriction = Some(UpgradeRestriction::Present);
-		candidate.commitments_mut().new_validation_code = Some(ValidationCode(vec![1, 2, 3]));
+		candidate.commitments.new_validation_code = Some(ValidationCode(vec![1, 2, 3]));
 
 		assert_eq!(
-			Fragment::new(relay_parent, constraints, candidate),
+			Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())),
 			Err(FragmentValidityError::CodeUpgradeRestricted),
 		);
 	}
@@ -1424,23 +1414,23 @@ mod tests {
 		let constraints = make_constraints();
 		let mut candidate = make_candidate(&constraints, &relay_parent);
 
-		candidate.commitments_mut().horizontal_messages = HorizontalMessages::truncate_from(vec![
+		candidate.commitments.horizontal_messages = HorizontalMessages::truncate_from(vec![
 			OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![1, 2, 3] },
 			OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] },
 		]);
 
 		assert_eq!(
-			Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()),
+			Fragment::new(relay_parent.clone(), constraints.clone(), Arc::new(candidate.clone())),
 			Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)),
 		);
 
-		candidate.commitments_mut().horizontal_messages = HorizontalMessages::truncate_from(vec![
+		candidate.commitments.horizontal_messages = HorizontalMessages::truncate_from(vec![
 			OutboundHrmpMessage { recipient: ParaId::from(1 as u32), data: vec![1, 2, 3] },
 			OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] },
 		]);
 
 		assert_eq!(
-			Fragment::new(relay_parent, constraints, candidate),
+			Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())),
 			Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)),
 		);
 	}
diff --git a/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md b/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md
index 8f00ff084941cc260dea6a9e76c0ff30d3770caf..701f6c87caff0341c36e4b2799d2444c015c411c 100644
--- a/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md
+++ b/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md
@@ -98,15 +98,11 @@ prospective validation data. This is unlikely to change.
     hashes.
   - Sent by the Provisioner when requesting backable candidates, when
     selecting candidates for a given relay-parent.
-- `ProspectiveParachainsMessage::GetHypotheticalFrontier`
+- `ProspectiveParachainsMessage::GetHypotheticalMembership`
   - Gets the hypothetical frontier membership of candidates with the
     given properties under the specified active leaves' fragment trees.
   - Sent by the Backing Subsystem when sanity-checking whether a candidate can
     be seconded based on its hypothetical frontiers.
-- `ProspectiveParachainsMessage::GetTreeMembership`
-  - Gets the membership of the candidate in all fragment trees.
-  - Sent by the Backing Subsystem when it needs to update the candidates
-    seconded at various depths under new active leaves.
 - `ProspectiveParachainsMessage::GetMinimumRelayParents`
   - Gets the minimum accepted relay-parent number for each para in the
     fragment tree for the given relay-chain block hash.
diff --git a/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution.md b/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution.md
index e6e597c531787f46ced0a6f9e38e05817f2323d7..e5eb9bd7642c1108c45e73134a00ee22b2f6475c 100644
--- a/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution.md
+++ b/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution.md
@@ -194,7 +194,7 @@ request). This doesn't fully avoid race conditions, but tries to minimize them.
   - Reports a peer (either good or bad).
 - `CandidateBackingMessage::Statement`
   - Note a validator's statement about a particular candidate.
-- `ProspectiveParachainsMessage::GetHypotheticalFrontier`
+- `ProspectiveParachainsMessage::GetHypotheticalMembership`
   - Gets the hypothetical frontier membership of candidates under active leaves' fragment trees.
 - `NetworkBridgeTxMessage::SendRequests`
   - Sends requests, initiating the request/response protocol.
diff --git a/prdoc/pr_4035.prdoc b/prdoc/pr_4035.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..0617a6a261898b743d74dc8fa55224012080391b
--- /dev/null
+++ b/prdoc/pr_4035.prdoc
@@ -0,0 +1,24 @@
+title: "Prospective parachains rework"
+
+doc:
+  - audience: Node Dev
+    description: |
+      Changes prospective-parachains from dealing with trees of unincluded candidates to maintaining only candidate chains
+      and a number of unconnected candidates (for which we don't yet know the parent candidate but which otherwise seem potentially viable).
+      This is needed for elastic scaling, in order to have full throughput even if a candidate is validated by a backing group before the parent candidate
+      is fetched from the other backing group.
+      Also simplifies the subsystem by no longer allowing parachain cycles.
+
+crates:
+  - name: polkadot-node-core-prospective-parachains
+    bump: major
+  - name: polkadot-node-core-backing
+    bump: minor
+  - name: polkadot-collator-protocol
+    bump: minor
+  - name: polkadot-statement-distribution
+    bump: minor
+  - name: polkadot-node-subsystem-types
+    bump: major
+  - name: polkadot-node-subsystem-util
+    bump: major