From 74ec1ee226ace087748f38dfeffc869cd5534ac8 Mon Sep 17 00:00:00 2001
From: Alin Dima <alin@parity.io>
Date: Tue, 5 Nov 2024 14:05:31 +0200
Subject: [PATCH] refactor and harden check_core_index (#6217)

Resolves https://github.com/paritytech/polkadot-sdk/issues/6179
---
 .../consensus/aura/src/collators/lookahead.rs |   8 +-
 .../slot_based/block_builder_task.rs          |   6 +-
 cumulus/pallets/parachain-system/src/lib.rs   |   4 +-
 cumulus/primitives/core/src/lib.rs            |   4 -
 .../node/collation-generation/src/error.rs    |   4 +-
 polkadot/node/collation-generation/src/lib.rs |   2 +-
 .../src/inclusion_emulator/mod.rs             |  15 +-
 polkadot/primitives/src/vstaging/mod.rs       | 206 ++++++++++--------
 .../runtime/parachains/src/inclusion/mod.rs   |  38 +---
 .../parachains/src/paras_inherent/tests.rs    |   7 +-
 prdoc/pr_6217.prdoc                           |  24 ++
 11 files changed, 162 insertions(+), 156 deletions(-)
 create mode 100644 prdoc/pr_6217.prdoc

diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs
index 8ac43fbd116..2dbcf5eb58e 100644
--- a/cumulus/client/consensus/aura/src/collators/lookahead.rs
+++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs
@@ -36,17 +36,15 @@ use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterfa
 use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker};
 use cumulus_client_consensus_proposer::ProposerInterface;
 use cumulus_primitives_aura::AuraUnincludedSegmentApi;
-use cumulus_primitives_core::{
-	ClaimQueueOffset, CollectCollationInfo, PersistedValidationData, DEFAULT_CLAIM_QUEUE_OFFSET,
-};
+use cumulus_primitives_core::{ClaimQueueOffset, CollectCollationInfo, PersistedValidationData};
 use cumulus_relay_chain_interface::RelayChainInterface;
 
 use polkadot_node_primitives::{PoV, SubmitCollationParams};
 use polkadot_node_subsystem::messages::CollationGenerationMessage;
 use polkadot_overseer::Handle as OverseerHandle;
 use polkadot_primitives::{
-	BlockNumber as RBlockNumber, CollatorPair, Hash as RHash, HeadData, Id as ParaId,
-	OccupiedCoreAssumption,
+	vstaging::DEFAULT_CLAIM_QUEUE_OFFSET, BlockNumber as RBlockNumber, CollatorPair, Hash as RHash,
+	HeadData, Id as ParaId, OccupiedCoreAssumption,
 };
 
 use futures::prelude::*;
diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs
index e75b52aeebd..42515123070 100644
--- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs
+++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs
@@ -20,13 +20,11 @@ use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterfa
 use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker};
 use cumulus_client_consensus_proposer::ProposerInterface;
 use cumulus_primitives_aura::AuraUnincludedSegmentApi;
-use cumulus_primitives_core::{
-	GetCoreSelectorApi, PersistedValidationData, DEFAULT_CLAIM_QUEUE_OFFSET,
-};
+use cumulus_primitives_core::{GetCoreSelectorApi, PersistedValidationData};
 use cumulus_relay_chain_interface::RelayChainInterface;
 
 use polkadot_primitives::{
-	vstaging::{ClaimQueueOffset, CoreSelector},
+	vstaging::{ClaimQueueOffset, CoreSelector, DEFAULT_CLAIM_QUEUE_OFFSET},
 	BlockId, CoreIndex, Hash as RelayHash, Header as RelayHeader, Id as ParaId,
 	OccupiedCoreAssumption,
 };
diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs
index 98989a852b8..39fc8321a07 100644
--- a/cumulus/pallets/parachain-system/src/lib.rs
+++ b/cumulus/pallets/parachain-system/src/lib.rs
@@ -35,12 +35,12 @@ use core::{cmp, marker::PhantomData};
 use cumulus_primitives_core::{
 	relay_chain::{
 		self,
-		vstaging::{ClaimQueueOffset, CoreSelector},
+		vstaging::{ClaimQueueOffset, CoreSelector, DEFAULT_CLAIM_QUEUE_OFFSET},
 	},
 	AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, GetChannelInfo,
 	InboundDownwardMessage, InboundHrmpMessage, ListChannelInfos, MessageSendError,
 	OutboundHrmpMessage, ParaId, PersistedValidationData, UpwardMessage, UpwardMessageSender,
-	XcmpMessageHandler, XcmpMessageSource, DEFAULT_CLAIM_QUEUE_OFFSET,
+	XcmpMessageHandler, XcmpMessageSource,
 };
 use cumulus_primitives_parachain_inherent::{MessageQueueChain, ParachainInherentData};
 use frame_support::{
diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs
index dfb574ef330..f88e663db19 100644
--- a/cumulus/primitives/core/src/lib.rs
+++ b/cumulus/primitives/core/src/lib.rs
@@ -333,10 +333,6 @@ pub mod rpsr_digest {
 	}
 }
 
-/// The default claim queue offset to be used if it's not configured/accessible in the parachain
-/// runtime
-pub const DEFAULT_CLAIM_QUEUE_OFFSET: u8 = 0;
-
 /// Information about a collation.
 ///
 /// This was used in version 1 of the [`CollectCollationInfo`] runtime api.
diff --git a/polkadot/node/collation-generation/src/error.rs b/polkadot/node/collation-generation/src/error.rs
index 68902f58579..2599026080d 100644
--- a/polkadot/node/collation-generation/src/error.rs
+++ b/polkadot/node/collation-generation/src/error.rs
@@ -14,7 +14,7 @@
 // You should have received a copy of the GNU General Public License
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
-use polkadot_primitives::vstaging::CandidateReceiptError;
+use polkadot_primitives::vstaging::CommittedCandidateReceiptError;
 use thiserror::Error;
 
 #[derive(Debug, Error)]
@@ -34,7 +34,7 @@ pub enum Error {
 	#[error("Collation submitted before initialization")]
 	SubmittedBeforeInit,
 	#[error("V2 core index check failed: {0}")]
-	CandidateReceiptCheck(CandidateReceiptError),
+	CandidateReceiptCheck(CommittedCandidateReceiptError),
 	#[error("PoV size {0} exceeded maximum size of {1}")]
 	POVSizeExceeded(usize, usize),
 }
diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs
index 9e975acf10b..b371017a828 100644
--- a/polkadot/node/collation-generation/src/lib.rs
+++ b/polkadot/node/collation-generation/src/lib.rs
@@ -554,7 +554,7 @@ async fn construct_and_distribute_receipt(
 
 		ccr.to_plain()
 	} else {
-		if commitments.selected_core().is_some() {
+		if commitments.core_selector().map_err(Error::CandidateReceiptCheck)?.is_some() {
 			gum::warn!(
 				target: LOG_TARGET,
 				?pov_hash,
diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs
index 20ca62d41f5..48d3f27b1fa 100644
--- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs
+++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs
@@ -82,9 +82,9 @@
 /// in practice at most once every few weeks.
 use polkadot_node_subsystem::messages::HypotheticalCandidate;
 use polkadot_primitives::{
-	async_backing::Constraints as PrimitiveConstraints, BlockNumber, CandidateCommitments,
-	CandidateHash, Hash, HeadData, Id as ParaId, PersistedValidationData, UpgradeRestriction,
-	ValidationCodeHash,
+	async_backing::Constraints as PrimitiveConstraints, vstaging::skip_ump_signals, BlockNumber,
+	CandidateCommitments, CandidateHash, Hash, HeadData, Id as ParaId, PersistedValidationData,
+	UpgradeRestriction, ValidationCodeHash,
 };
 use std::{collections::HashMap, sync::Arc};
 
@@ -601,13 +601,8 @@ impl Fragment {
 		persisted_validation_data: &PersistedValidationData,
 	) -> Result<ConstraintModifications, FragmentValidityError> {
 		// Filter UMP signals and the separator.
-		let upward_messages = if let Some(separator_index) =
-			commitments.upward_messages.iter().position(|message| message.is_empty())
-		{
-			commitments.upward_messages.split_at(separator_index).0
-		} else {
-			&commitments.upward_messages
-		};
+		let upward_messages =
+			skip_ump_signals(commitments.upward_messages.iter()).collect::<Vec<_>>();
 
 		let ump_messages_sent = upward_messages.len();
 		let ump_bytes_sent = upward_messages.iter().map(|msg| msg.len()).sum();
diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs
index ca9c3e1beba..271f78efe09 100644
--- a/polkadot/primitives/src/vstaging/mod.rs
+++ b/polkadot/primitives/src/vstaging/mod.rs
@@ -39,6 +39,10 @@ use sp_staking::SessionIndex;
 /// Async backing primitives
 pub mod async_backing;
 
+/// The default claim queue offset to be used if it's not configured/accessible in the parachain
+/// runtime
+pub const DEFAULT_CLAIM_QUEUE_OFFSET: u8 = 0;
+
 /// A type representing the version of the candidate descriptor and internal version number.
 #[derive(PartialEq, Eq, Encode, Decode, Clone, TypeInfo, RuntimeDebug, Copy)]
 #[cfg_attr(feature = "std", derive(Hash))]
@@ -430,49 +434,45 @@ pub enum UMPSignal {
 /// Separator between `XCM` and `UMPSignal`.
 pub const UMP_SEPARATOR: Vec<u8> = vec![];
 
-impl CandidateCommitments {
-	/// Returns the core selector and claim queue offset the candidate has committed to, if any.
-	pub fn selected_core(&self) -> Option<(CoreSelector, ClaimQueueOffset)> {
-		// We need at least 2 messages for the separator and core selector
-		if self.upward_messages.len() < 2 {
-			return None
-		}
-
-		let separator_pos =
-			self.upward_messages.iter().rposition(|message| message == &UMP_SEPARATOR)?;
-
-		// Use first commitment
-		let message = self.upward_messages.get(separator_pos + 1)?;
+/// Utility function for skipping the ump signals.
+pub fn skip_ump_signals<'a>(
+	upward_messages: impl Iterator<Item = &'a Vec<u8>>,
+) -> impl Iterator<Item = &'a Vec<u8>> {
+	upward_messages.take_while(|message| *message != &UMP_SEPARATOR)
+}
 
-		match UMPSignal::decode(&mut message.as_slice()).ok()? {
-			UMPSignal::SelectCore(core_selector, cq_offset) => Some((core_selector, cq_offset)),
-		}
-	}
+impl CandidateCommitments {
+	/// Returns the core selector and claim queue offset determined by `UMPSignal::SelectCore`
+	/// commitment, if present.
+	pub fn core_selector(
+		&self,
+	) -> Result<Option<(CoreSelector, ClaimQueueOffset)>, CommittedCandidateReceiptError> {
+		let mut signals_iter =
+			self.upward_messages.iter().skip_while(|message| *message != &UMP_SEPARATOR);
+
+		if signals_iter.next().is_some() {
+			let Some(core_selector_message) = signals_iter.next() else { return Ok(None) };
+			// We should have exactly one signal beyond the separator
+			if signals_iter.next().is_some() {
+				return Err(CommittedCandidateReceiptError::TooManyUMPSignals)
+			}
 
-	/// Returns the core index determined by `UMPSignal::SelectCore` commitment
-	/// and `assigned_cores`.
-	///
-	/// Returns `None` if there is no `UMPSignal::SelectCore` commitment or
-	/// assigned cores is empty.
-	///
-	/// `assigned_cores` must be a sorted vec of all core indices assigned to a parachain.
-	pub fn committed_core_index(&self, assigned_cores: &[&CoreIndex]) -> Option<CoreIndex> {
-		if assigned_cores.is_empty() {
-			return None
+			match UMPSignal::decode(&mut core_selector_message.as_slice())
+				.map_err(|_| CommittedCandidateReceiptError::UmpSignalDecode)?
+			{
+				UMPSignal::SelectCore(core_index_selector, cq_offset) =>
+					Ok(Some((core_index_selector, cq_offset))),
+			}
+		} else {
+			Ok(None)
 		}
-
-		self.selected_core().and_then(|(core_selector, _cq_offset)| {
-			let core_index =
-				**assigned_cores.get(core_selector.0 as usize % assigned_cores.len())?;
-			Some(core_index)
-		})
 	}
 }
 
-/// CandidateReceipt construction errors.
+/// CommittedCandidateReceiptError construction errors.
 #[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, RuntimeDebug)]
 #[cfg_attr(feature = "std", derive(thiserror::Error))]
-pub enum CandidateReceiptError {
+pub enum CommittedCandidateReceiptError {
 	/// The specified core index is invalid.
 	#[cfg_attr(feature = "std", error("The specified core index is invalid"))]
 	InvalidCoreIndex,
@@ -485,6 +485,9 @@ pub enum CandidateReceiptError {
 	/// The core selector or claim queue offset is invalid.
 	#[cfg_attr(feature = "std", error("The core selector or claim queue offset is invalid"))]
 	InvalidSelectedCore,
+	#[cfg_attr(feature = "std", error("Could not decode UMP signal"))]
+	/// Could not decode UMP signal.
+	UmpSignalDecode,
 	/// The parachain is not assigned to any core at specified claim queue offset.
 	#[cfg_attr(
 		feature = "std",
@@ -498,6 +501,10 @@ pub enum CandidateReceiptError {
 	/// Unknown version.
 	#[cfg_attr(feature = "std", error("Unknown internal version"))]
 	UnknownVersion(InternalVersion),
+	/// The allowed number of `UMPSignal` messages in the queue was exceeded.
+	/// Currenly only one such message is allowed.
+	#[cfg_attr(feature = "std", error("Too many UMP signals"))]
+	TooManyUMPSignals,
 }
 
 macro_rules! impl_getter {
@@ -590,57 +597,63 @@ impl<H: Copy> CandidateDescriptorV2<H> {
 
 impl<H: Copy> CommittedCandidateReceiptV2<H> {
 	/// Checks if descriptor core index is equal to the committed core index.
-	/// Input `cores_per_para` is a claim queue snapshot stored as a mapping
-	/// between `ParaId` and the cores assigned per depth.
+	/// Input `cores_per_para` is a claim queue snapshot at the candidate's relay parent, stored as
+	/// a mapping between `ParaId` and the cores assigned per depth.
 	pub fn check_core_index(
 		&self,
 		cores_per_para: &TransposedClaimQueue,
-	) -> Result<(), CandidateReceiptError> {
+	) -> Result<(), CommittedCandidateReceiptError> {
 		match self.descriptor.version() {
 			// Don't check v1 descriptors.
 			CandidateDescriptorVersion::V1 => return Ok(()),
 			CandidateDescriptorVersion::V2 => {},
 			CandidateDescriptorVersion::Unknown =>
-				return Err(CandidateReceiptError::UnknownVersion(self.descriptor.version)),
+				return Err(CommittedCandidateReceiptError::UnknownVersion(self.descriptor.version)),
 		}
 
-		if cores_per_para.is_empty() {
-			return Err(CandidateReceiptError::NoAssignment)
-		}
-
-		let (offset, core_selected) =
-			if let Some((_core_selector, cq_offset)) = self.commitments.selected_core() {
-				(cq_offset.0, true)
-			} else {
-				// If no core has been selected then we use offset 0 (top of claim queue)
-				(0, false)
-			};
+		let (maybe_core_index_selector, cq_offset) = self.commitments.core_selector()?.map_or_else(
+			|| (None, ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET)),
+			|(sel, off)| (Some(sel), off),
+		);
 
-		// The cores assigned to the parachain at above computed offset.
 		let assigned_cores = cores_per_para
 			.get(&self.descriptor.para_id())
-			.ok_or(CandidateReceiptError::NoAssignment)?
-			.get(&offset)
-			.ok_or(CandidateReceiptError::NoAssignment)?
-			.into_iter()
-			.collect::<Vec<_>>();
-
-		let core_index = if core_selected {
-			self.commitments
-				.committed_core_index(assigned_cores.as_slice())
-				.ok_or(CandidateReceiptError::NoAssignment)?
-		} else {
-			// `SelectCore` commitment is mandatory for elastic scaling parachains.
-			if assigned_cores.len() > 1 {
-				return Err(CandidateReceiptError::NoCoreSelected)
-			}
+			.ok_or(CommittedCandidateReceiptError::NoAssignment)?
+			.get(&cq_offset.0)
+			.ok_or(CommittedCandidateReceiptError::NoAssignment)?;
 
-			**assigned_cores.get(0).ok_or(CandidateReceiptError::NoAssignment)?
-		};
+		if assigned_cores.is_empty() {
+			return Err(CommittedCandidateReceiptError::NoAssignment)
+		}
 
 		let descriptor_core_index = CoreIndex(self.descriptor.core_index as u32);
+
+		let core_index_selector = if let Some(core_index_selector) = maybe_core_index_selector {
+			// We have a committed core selector, we can use it.
+			core_index_selector
+		} else if assigned_cores.len() > 1 {
+			// We got more than one assigned core and no core selector. Special care is needed.
+			if !assigned_cores.contains(&descriptor_core_index) {
+				// core index in the descriptor is not assigned to the para. Error.
+				return Err(CommittedCandidateReceiptError::InvalidCoreIndex)
+			} else {
+				// the descriptor core index is indeed assigned to the para. This is the most we can
+				// check for now
+				return Ok(())
+			}
+		} else {
+			// No core selector but there's only one assigned core, use it.
+			CoreSelector(0)
+		};
+
+		let core_index = assigned_cores
+			.iter()
+			.nth(core_index_selector.0 as usize % assigned_cores.len())
+			.ok_or(CommittedCandidateReceiptError::InvalidSelectedCore)
+			.copied()?;
+
 		if core_index != descriptor_core_index {
-			return Err(CandidateReceiptError::CoreIndexMismatch)
+			return Err(CommittedCandidateReceiptError::CoreIndexMismatch)
 		}
 
 		Ok(())
@@ -1037,7 +1050,7 @@ mod tests {
 		assert_eq!(new_ccr.descriptor.version(), CandidateDescriptorVersion::Unknown);
 		assert_eq!(
 			new_ccr.check_core_index(&BTreeMap::new()),
-			Err(CandidateReceiptError::UnknownVersion(InternalVersion(100)))
+			Err(CommittedCandidateReceiptError::UnknownVersion(InternalVersion(100)))
 		)
 	}
 
@@ -1075,7 +1088,6 @@ mod tests {
 		new_ccr.descriptor.core_index = 0;
 		new_ccr.descriptor.para_id = ParaId::new(1000);
 
-		new_ccr.commitments.upward_messages.force_push(UMP_SEPARATOR);
 		new_ccr.commitments.upward_messages.force_push(UMP_SEPARATOR);
 
 		let mut cq = BTreeMap::new();
@@ -1089,7 +1101,14 @@ mod tests {
 		new_ccr.commitments.upward_messages.force_push(vec![0, 13, 200].encode());
 
 		// No `SelectCore` can be decoded.
-		assert_eq!(new_ccr.commitments.selected_core(), None);
+		assert_eq!(
+			new_ccr.commitments.core_selector(),
+			Err(CommittedCandidateReceiptError::UmpSignalDecode)
+		);
+
+		// Has two cores assigned but no core commitment. Will pass the check if the descriptor core
+		// index is indeed assigned to the para.
+		new_ccr.commitments.upward_messages.clear();
 
 		let mut cq = BTreeMap::new();
 		cq.insert(
@@ -1100,28 +1119,46 @@ mod tests {
 			CoreIndex(100),
 			vec![new_ccr.descriptor.para_id(), new_ccr.descriptor.para_id()].into(),
 		);
+		assert_eq!(new_ccr.check_core_index(&transpose_claim_queue(cq.clone())), Ok(()));
 
+		new_ccr.descriptor.set_core_index(CoreIndex(1));
 		assert_eq!(
 			new_ccr.check_core_index(&transpose_claim_queue(cq.clone())),
-			Err(CandidateReceiptError::NoCoreSelected)
+			Err(CommittedCandidateReceiptError::InvalidCoreIndex)
 		);
+		new_ccr.descriptor.set_core_index(CoreIndex(0));
 
 		new_ccr.commitments.upward_messages.clear();
 		new_ccr.commitments.upward_messages.force_push(UMP_SEPARATOR);
-
 		new_ccr
 			.commitments
 			.upward_messages
 			.force_push(UMPSignal::SelectCore(CoreSelector(0), ClaimQueueOffset(1)).encode());
 
-		// Duplicate
+		// No assignments.
+		assert_eq!(
+			new_ccr.check_core_index(&transpose_claim_queue(Default::default())),
+			Err(CommittedCandidateReceiptError::NoAssignment)
+		);
+
+		// Mismatch between descriptor index and commitment.
+		new_ccr.descriptor.set_core_index(CoreIndex(1));
+		assert_eq!(
+			new_ccr.check_core_index(&transpose_claim_queue(cq.clone())),
+			Err(CommittedCandidateReceiptError::CoreIndexMismatch)
+		);
+		new_ccr.descriptor.set_core_index(CoreIndex(0));
+
+		// Too many UMP signals.
 		new_ccr
 			.commitments
 			.upward_messages
 			.force_push(UMPSignal::SelectCore(CoreSelector(1), ClaimQueueOffset(1)).encode());
 
-		// Duplicate doesn't override first signal.
-		assert_eq!(new_ccr.check_core_index(&transpose_claim_queue(cq)), Ok(()));
+		assert_eq!(
+			new_ccr.check_core_index(&transpose_claim_queue(cq)),
+			Err(CommittedCandidateReceiptError::TooManyUMPSignals)
+		);
 	}
 
 	#[test]
@@ -1191,7 +1228,7 @@ mod tests {
 			Decode::decode(&mut encoded_ccr.as_slice()).unwrap();
 
 		assert_eq!(v1_ccr.descriptor.version(), CandidateDescriptorVersion::V1);
-		assert!(v1_ccr.commitments.selected_core().is_some());
+		assert!(v1_ccr.commitments.core_selector().unwrap().is_some());
 
 		let mut cq = BTreeMap::new();
 		cq.insert(CoreIndex(0), vec![v1_ccr.descriptor.para_id()].into());
@@ -1199,11 +1236,6 @@ mod tests {
 
 		assert!(v1_ccr.check_core_index(&transpose_claim_queue(cq)).is_ok());
 
-		assert_eq!(
-			v1_ccr.commitments.committed_core_index(&vec![&CoreIndex(10), &CoreIndex(5)]),
-			Some(CoreIndex(5)),
-		);
-
 		assert_eq!(v1_ccr.descriptor.core_index(), None);
 	}
 
@@ -1228,11 +1260,9 @@ mod tests {
 		cq.insert(CoreIndex(0), vec![new_ccr.descriptor.para_id()].into());
 		cq.insert(CoreIndex(1), vec![new_ccr.descriptor.para_id()].into());
 
-		//  Should fail because 2 cores are assigned,
-		assert_eq!(
-			new_ccr.check_core_index(&transpose_claim_queue(cq)),
-			Err(CandidateReceiptError::NoCoreSelected)
-		);
+		// Passes even if 2 cores are assigned, because elastic scaling MVP could still inject the
+		// core index in the `BackedCandidate`.
+		assert_eq!(new_ccr.check_core_index(&transpose_claim_queue(cq)), Ok(()));
 
 		// Adding collator signature should make it decode as v1.
 		old_ccr.descriptor.signature = dummy_collator_signature();
diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs
index ea3a5d3cdda..8ad9711a0f3 100644
--- a/polkadot/runtime/parachains/src/inclusion/mod.rs
+++ b/polkadot/runtime/parachains/src/inclusion/mod.rs
@@ -46,7 +46,7 @@ use pallet_message_queue::OnQueueChanged;
 use polkadot_primitives::{
 	effective_minimum_backing_votes, supermajority_threshold,
 	vstaging::{
-		BackedCandidate, CandidateDescriptorV2 as CandidateDescriptor,
+		skip_ump_signals, BackedCandidate, CandidateDescriptorV2 as CandidateDescriptor,
 		CandidateReceiptV2 as CandidateReceipt,
 		CommittedCandidateReceiptV2 as CommittedCandidateReceipt,
 	},
@@ -412,11 +412,6 @@ pub(crate) enum UmpAcceptanceCheckErr {
 	TotalSizeExceeded { total_size: u64, limit: u64 },
 	/// A para-chain cannot send UMP messages while it is offboarding.
 	IsOffboarding,
-	/// The allowed number of `UMPSignal` messages in the queue was exceeded.
-	/// Currenly only one such message is allowed.
-	TooManyUMPSignals { count: u32 },
-	/// The UMP queue contains an invalid `UMPSignal`
-	NoUmpSignal,
 }
 
 impl fmt::Debug for UmpAcceptanceCheckErr {
@@ -445,12 +440,6 @@ impl fmt::Debug for UmpAcceptanceCheckErr {
 			UmpAcceptanceCheckErr::IsOffboarding => {
 				write!(fmt, "upward message rejected because the para is off-boarding")
 			},
-			UmpAcceptanceCheckErr::TooManyUMPSignals { count } => {
-				write!(fmt, "the ump queue has too many `UMPSignal` messages ({} > 1 )", count)
-			},
-			UmpAcceptanceCheckErr::NoUmpSignal => {
-				write!(fmt, "Required UMP signal not found")
-			},
 		}
 	}
 }
@@ -925,25 +914,7 @@ impl<T: Config> Pallet<T> {
 		upward_messages: &[UpwardMessage],
 	) -> Result<(), UmpAcceptanceCheckErr> {
 		// Filter any pending UMP signals and the separator.
-		let upward_messages = if let Some(separator_index) =
-			upward_messages.iter().position(|message| message.is_empty())
-		{
-			let (upward_messages, ump_signals) = upward_messages.split_at(separator_index);
-
-			if ump_signals.len() > 2 {
-				return Err(UmpAcceptanceCheckErr::TooManyUMPSignals {
-					count: ump_signals.len() as u32,
-				})
-			}
-
-			if ump_signals.len() == 1 {
-				return Err(UmpAcceptanceCheckErr::NoUmpSignal)
-			}
-
-			upward_messages
-		} else {
-			upward_messages
-		};
+		let upward_messages = skip_ump_signals(upward_messages.iter()).collect::<Vec<_>>();
 
 		// Cannot send UMP messages while off-boarding.
 		if paras::Pallet::<T>::is_offboarding(para) {
@@ -997,10 +968,7 @@ impl<T: Config> Pallet<T> {
 	/// to deal with the messages as given. Messages that are too long will be ignored since such
 	/// candidates should have already been rejected in [`Self::check_upward_messages`].
 	pub(crate) fn receive_upward_messages(para: ParaId, upward_messages: &[Vec<u8>]) {
-		let bounded = upward_messages
-			.iter()
-			// Stop once we hit the `UMPSignal` separator.
-			.take_while(|message| !message.is_empty())
+		let bounded = skip_ump_signals(upward_messages.iter())
 			.filter_map(|d| {
 				BoundedSlice::try_from(&d[..])
 					.inspect_err(|_| {
diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs
index eef26b83368..146be0ee0aa 100644
--- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs
+++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs
@@ -1864,11 +1864,8 @@ mod enter {
 				v2_descriptor: true,
 				candidate_modifier: Some(|mut candidate: CommittedCandidateReceiptV2| {
 					if candidate.descriptor.para_id() == 1.into() {
-						// Drop the core selector to make it invalid
-						candidate
-							.commitments
-							.upward_messages
-							.truncate(candidate.commitments.upward_messages.len() - 1);
+						// Make the core selector invalid
+						candidate.commitments.upward_messages[1].truncate(0);
 					}
 					candidate
 				}),
diff --git a/prdoc/pr_6217.prdoc b/prdoc/pr_6217.prdoc
new file mode 100644
index 00000000000..2fa800b58d2
--- /dev/null
+++ b/prdoc/pr_6217.prdoc
@@ -0,0 +1,24 @@
+title: 'Unify and harden UMP signal checks in check_core_index'
+doc:
+- audience: [Runtime Dev, Node Dev]
+  description: |
+    Refactors and hardens the core index checks on the candidate commitments.
+    Also adds a utility for skipping the ump signals
+
+crates:
+- name: cumulus-client-consensus-aura
+  bump: patch
+- name: cumulus-pallet-parachain-system
+  bump: patch
+- name: cumulus-primitives-core
+  bump: major
+- name: polkadot-node-collation-generation
+  bump: major
+- name: polkadot-node-core-candidate-validation
+  bump: patch
+- name: polkadot-node-subsystem-util
+  bump: patch
+- name: polkadot-primitives
+  bump: major
+- name: polkadot-runtime-parachains
+  bump: patch
-- 
GitLab