diff --git a/polkadot/node/core/backing/src/error.rs b/polkadot/node/core/backing/src/error.rs
index e09d8425f78a07b3d9c8151cdc96ba2f5139e3a5..e1852be826f497ebf21f0f482b81acb6901f5a22 100644
--- a/polkadot/node/core/backing/src/error.rs
+++ b/polkadot/node/core/backing/src/error.rs
@@ -105,6 +105,9 @@ pub enum Error {
 
 	#[error("Availability store error")]
 	StoreAvailableData(#[source] StoreAvailableDataError),
+
+	#[error("Runtime API returned None for executor params")]
+	MissingExecutorParams,
 }
 
 /// Utility for eating top level errors and log them.
diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs
index 30121418a2fde448c6ea085a8b4797deb04795ac..250013c6541a2a3340966f5dcd32b7b2f88f67a2 100644
--- a/polkadot/node/core/backing/src/lib.rs
+++ b/polkadot/node/core/backing/src/lib.rs
@@ -93,12 +93,13 @@ use polkadot_node_subsystem::{
 		RuntimeApiMessage, RuntimeApiRequest, StatementDistributionMessage,
 		StoreAvailableDataError,
 	},
-	overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError,
+	overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, RuntimeApiError, SpawnedSubsystem,
+	SubsystemError,
 };
 use polkadot_node_subsystem_util::{
 	self as util,
 	backing_implicit_view::{FetchError as ImplicitViewFetchError, View as ImplicitView},
-	executor_params_at_relay_parent, request_from_runtime, request_session_index_for_child,
+	request_from_runtime, request_session_executor_params, request_session_index_for_child,
 	request_validator_groups, request_validators,
 	runtime::{
 		self, fetch_claim_queue, prospective_parachains_mode, request_min_backing_votes,
@@ -217,8 +218,10 @@ struct PerRelayParentState {
 	prospective_parachains_mode: ProspectiveParachainsMode,
 	/// The hash of the relay parent on top of which this job is doing it's work.
 	parent: Hash,
-	/// Session index.
-	session_index: SessionIndex,
+	/// The node features.
+	node_features: NodeFeatures,
+	/// The executor parameters.
+	executor_params: Arc<ExecutorParams>,
 	/// The `CoreIndex` assigned to the local validator at this relay parent.
 	assigned_core: Option<CoreIndex>,
 	/// The candidates that are backed by enough validators in their group, by hash.
@@ -292,6 +295,178 @@ impl From<&ActiveLeafState> for ProspectiveParachainsMode {
 	}
 }
 
+/// A cache for storing data per-session to reduce repeated
+/// runtime API calls and avoid redundant computations.
+struct PerSessionCache {
+	/// Cache for storing validators list, retrieved from the runtime.
+	validators_cache: LruMap<SessionIndex, Arc<Vec<ValidatorId>>>,
+	/// Cache for storing node features, retrieved from the runtime.
+	node_features_cache: LruMap<SessionIndex, Option<NodeFeatures>>,
+	/// Cache for storing executor parameters, retrieved from the runtime.
+	executor_params_cache: LruMap<SessionIndex, Arc<ExecutorParams>>,
+	/// Cache for storing the minimum backing votes threshold, retrieved from the runtime.
+	minimum_backing_votes_cache: LruMap<SessionIndex, u32>,
+	/// Cache for storing validator-to-group mappings, computed from validator groups.
+	validator_to_group_cache:
+		LruMap<SessionIndex, Arc<IndexedVec<ValidatorIndex, Option<GroupIndex>>>>,
+}
+
+impl Default for PerSessionCache {
+	/// Creates a new `PerSessionCache` with a default capacity.
+	fn default() -> Self {
+		Self::new(2)
+	}
+}
+
+impl PerSessionCache {
+	/// Creates a new `PerSessionCache` with a given capacity.
+	fn new(capacity: u32) -> Self {
+		PerSessionCache {
+			validators_cache: LruMap::new(ByLength::new(capacity)),
+			node_features_cache: LruMap::new(ByLength::new(capacity)),
+			executor_params_cache: LruMap::new(ByLength::new(capacity)),
+			minimum_backing_votes_cache: LruMap::new(ByLength::new(capacity)),
+			validator_to_group_cache: LruMap::new(ByLength::new(capacity)),
+		}
+	}
+
+	/// Gets validators from the cache or fetches them from the runtime if not present.
+	async fn validators(
+		&mut self,
+		session_index: SessionIndex,
+		parent: Hash,
+		sender: &mut impl overseer::SubsystemSender<RuntimeApiMessage>,
+	) -> Result<Arc<Vec<ValidatorId>>, RuntimeApiError> {
+		// Try to get the validators list from the cache.
+		if let Some(validators) = self.validators_cache.get(&session_index) {
+			return Ok(Arc::clone(validators));
+		}
+
+		// Fetch the validators list from the runtime since it was not in the cache.
+		let validators: Vec<ValidatorId> =
+			request_validators(parent, sender).await.await.map_err(|err| {
+				RuntimeApiError::Execution { runtime_api_name: "Validators", source: Arc::new(err) }
+			})??;
+
+		// Wrap the validators list in an Arc to avoid a deep copy when storing it in the cache.
+		let validators = Arc::new(validators);
+
+		// Cache the fetched validators list for future use.
+		self.validators_cache.insert(session_index, Arc::clone(&validators));
+
+		Ok(validators)
+	}
+
+	/// Gets the node features from the cache or fetches it from the runtime if not present.
+	async fn node_features(
+		&mut self,
+		session_index: SessionIndex,
+		parent: Hash,
+		sender: &mut impl overseer::SubsystemSender<RuntimeApiMessage>,
+	) -> Result<Option<NodeFeatures>, Error> {
+		// Try to get the node features from the cache.
+		if let Some(node_features) = self.node_features_cache.get(&session_index) {
+			return Ok(node_features.clone());
+		}
+
+		// Fetch the node features from the runtime since it was not in the cache.
+		let node_features: Option<NodeFeatures> =
+			request_node_features(parent, session_index, sender).await?;
+
+		// Cache the fetched node features for future use.
+		self.node_features_cache.insert(session_index, node_features.clone());
+
+		Ok(node_features)
+	}
+
+	/// Gets the executor parameters from the cache or
+	/// fetches them from the runtime if not present.
+	async fn executor_params(
+		&mut self,
+		session_index: SessionIndex,
+		parent: Hash,
+		sender: &mut impl overseer::SubsystemSender<RuntimeApiMessage>,
+	) -> Result<Arc<ExecutorParams>, RuntimeApiError> {
+		// Try to get the executor parameters from the cache.
+		if let Some(executor_params) = self.executor_params_cache.get(&session_index) {
+			return Ok(Arc::clone(executor_params));
+		}
+
+		// Fetch the executor parameters from the runtime since it was not in the cache.
+		let executor_params = request_session_executor_params(parent, session_index, sender)
+			.await
+			.await
+			.map_err(|err| RuntimeApiError::Execution {
+				runtime_api_name: "SessionExecutorParams",
+				source: Arc::new(err),
+			})??
+			.ok_or_else(|| RuntimeApiError::Execution {
+				runtime_api_name: "SessionExecutorParams",
+				source: Arc::new(Error::MissingExecutorParams),
+			})?;
+
+		// Wrap the executor parameters in an Arc to avoid a deep copy when storing it in the cache.
+		let executor_params = Arc::new(executor_params);
+
+		// Cache the fetched executor parameters for future use.
+		self.executor_params_cache.insert(session_index, Arc::clone(&executor_params));
+
+		Ok(executor_params)
+	}
+
+	/// Gets the minimum backing votes threshold from the
+	/// cache or fetches it from the runtime if not present.
+	async fn minimum_backing_votes(
+		&mut self,
+		session_index: SessionIndex,
+		parent: Hash,
+		sender: &mut impl overseer::SubsystemSender<RuntimeApiMessage>,
+	) -> Result<u32, RuntimeApiError> {
+		// Try to get the value from the cache.
+		if let Some(minimum_backing_votes) = self.minimum_backing_votes_cache.get(&session_index) {
+			return Ok(*minimum_backing_votes);
+		}
+
+		// Fetch the value from the runtime since it was not in the cache.
+		let minimum_backing_votes = request_min_backing_votes(parent, session_index, sender)
+			.await
+			.map_err(|err| RuntimeApiError::Execution {
+				runtime_api_name: "MinimumBackingVotes",
+				source: Arc::new(err),
+			})?;
+
+		// Cache the fetched value for future use.
+		self.minimum_backing_votes_cache.insert(session_index, minimum_backing_votes);
+
+		Ok(minimum_backing_votes)
+	}
+
+	/// Gets or computes the validator-to-group mapping for a session.
+	fn validator_to_group(
+		&mut self,
+		session_index: SessionIndex,
+		validators: &[ValidatorId],
+		validator_groups: &[Vec<ValidatorIndex>],
+	) -> Arc<IndexedVec<ValidatorIndex, Option<GroupIndex>>> {
+		let validator_to_group = self
+			.validator_to_group_cache
+			.get_or_insert(session_index, || {
+				let mut vector = vec![None; validators.len()];
+
+				for (group_idx, validator_group) in validator_groups.iter().enumerate() {
+					for validator in validator_group {
+						vector[validator.0 as usize] = Some(GroupIndex(group_idx as u32));
+					}
+				}
+
+				Arc::new(IndexedVec::<_, _>::from(vector))
+			})
+			.expect("Just inserted");
+
+		Arc::clone(validator_to_group)
+	}
+}
+
 /// The state of the subsystem.
 struct State {
 	/// The utility for managing the implicit and explicit views in a consistent way.
@@ -322,9 +497,9 @@ struct State {
 	/// This is guaranteed to have an entry for each candidate with a relay parent in the implicit
 	/// or explicit view for which a `Seconded` statement has been successfully imported.
 	per_candidate: HashMap<CandidateHash, PerCandidateState>,
-	/// Cache the per-session Validator->Group mapping.
-	validator_to_group_cache:
-		LruMap<SessionIndex, Arc<IndexedVec<ValidatorIndex, Option<GroupIndex>>>>,
+	/// A local cache for storing per-session data. This cache helps to
+	/// reduce repeated calls to the runtime and avoid redundant computations.
+	per_session_cache: PerSessionCache,
 	/// A clonable sender which is dispatched to background candidate validation tasks to inform
 	/// the main task of the result.
 	background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>,
@@ -342,7 +517,7 @@ impl State {
 			per_leaf: HashMap::default(),
 			per_relay_parent: HashMap::default(),
 			per_candidate: HashMap::new(),
-			validator_to_group_cache: LruMap::new(ByLength::new(2)),
+			per_session_cache: PerSessionCache::default(),
 			background_validation_tx,
 			keystore,
 		}
@@ -670,7 +845,8 @@ struct BackgroundValidationParams<S: overseer::CandidateBackingSenderTrait, F> {
 	tx_command: mpsc::Sender<(Hash, ValidatedCandidateCommand)>,
 	candidate: CandidateReceipt,
 	relay_parent: Hash,
-	session_index: SessionIndex,
+	node_features: NodeFeatures,
+	executor_params: Arc<ExecutorParams>,
 	persisted_validation_data: PersistedValidationData,
 	pov: PoVData,
 	n_validators: usize,
@@ -689,7 +865,8 @@ async fn validate_and_make_available(
 		mut tx_command,
 		candidate,
 		relay_parent,
-		session_index,
+		node_features,
+		executor_params,
 		persisted_validation_data,
 		pov,
 		n_validators,
@@ -714,15 +891,6 @@ async fn validate_and_make_available(
 		}
 	};
 
-	let executor_params = match executor_params_at_relay_parent(relay_parent, &mut sender).await {
-		Ok(ep) => ep,
-		Err(e) => return Err(Error::UtilError(e)),
-	};
-
-	let node_features = request_node_features(relay_parent, session_index, &mut sender)
-		.await?
-		.unwrap_or(NodeFeatures::EMPTY);
-
 	let pov = match pov {
 		PoVData::Ready(pov) => pov,
 		PoVData::FetchFromValidator { from_validator, candidate_hash, pov_hash } =>
@@ -758,7 +926,7 @@ async fn validate_and_make_available(
 			validation_code,
 			candidate.clone(),
 			pov.clone(),
-			executor_params,
+			executor_params.as_ref().clone(),
 		)
 		.await?
 	};
@@ -985,7 +1153,7 @@ async fn handle_active_leaves_update<Context>(
 			ctx,
 			maybe_new,
 			&state.keystore,
-			&mut state.validator_to_group_cache,
+			&mut state.per_session_cache,
 			mode,
 		)
 		.await?;
@@ -1085,17 +1253,13 @@ async fn construct_per_relay_parent_state<Context>(
 	ctx: &mut Context,
 	relay_parent: Hash,
 	keystore: &KeystorePtr,
-	validator_to_group_cache: &mut LruMap<
-		SessionIndex,
-		Arc<IndexedVec<ValidatorIndex, Option<GroupIndex>>>,
-	>,
+	per_session_cache: &mut PerSessionCache,
 	mode: ProspectiveParachainsMode,
 ) -> Result<Option<PerRelayParentState>, Error> {
 	let parent = relay_parent;
 
-	let (session_index, validators, groups, cores) = futures::try_join!(
+	let (session_index, groups, cores) = futures::try_join!(
 		request_session_index_for_child(parent, ctx.sender()).await,
-		request_validators(parent, ctx.sender()).await,
 		request_validator_groups(parent, ctx.sender()).await,
 		request_from_runtime(parent, ctx.sender(), |tx| {
 			RuntimeApiRequest::AvailabilityCores(tx)
@@ -1106,20 +1270,32 @@ async fn construct_per_relay_parent_state<Context>(
 
 	let session_index = try_runtime_api!(session_index);
 
-	let inject_core_index = request_node_features(parent, session_index, ctx.sender())
+	let validators = per_session_cache.validators(session_index, parent, ctx.sender()).await;
+	let validators = try_runtime_api!(validators);
+
+	let node_features = per_session_cache
+		.node_features(session_index, parent, ctx.sender())
 		.await?
-		.unwrap_or(NodeFeatures::EMPTY)
+		.unwrap_or(NodeFeatures::EMPTY);
+
+	let inject_core_index = node_features
 		.get(FeatureIndex::ElasticScalingMVP as usize)
 		.map(|b| *b)
 		.unwrap_or(false);
 
+	let executor_params =
+		per_session_cache.executor_params(session_index, parent, ctx.sender()).await;
+	let executor_params = try_runtime_api!(executor_params);
+
 	gum::debug!(target: LOG_TARGET, inject_core_index, ?parent, "New state");
 
-	let validators: Vec<_> = try_runtime_api!(validators);
 	let (validator_groups, group_rotation_info) = try_runtime_api!(groups);
 	let cores = try_runtime_api!(cores);
-	let minimum_backing_votes =
-		try_runtime_api!(request_min_backing_votes(parent, session_index, ctx.sender()).await);
+
+	let minimum_backing_votes = per_session_cache
+		.minimum_backing_votes(session_index, parent, ctx.sender())
+		.await;
+	let minimum_backing_votes = try_runtime_api!(minimum_backing_votes);
 
 	// TODO: https://github.com/paritytech/polkadot-sdk/issues/1940
 	// Once runtime ver `DISABLED_VALIDATORS_RUNTIME_REQUIREMENT` is released remove this call to
@@ -1192,21 +1368,11 @@ async fn construct_per_relay_parent_state<Context>(
 	}
 	gum::debug!(target: LOG_TARGET, ?groups, "TableContext");
 
-	let validator_to_group = validator_to_group_cache
-		.get_or_insert(session_index, || {
-			let mut vector = vec![None; validators.len()];
-
-			for (group_idx, validator_group) in validator_groups.iter().enumerate() {
-				for validator in validator_group {
-					vector[validator.0 as usize] = Some(GroupIndex(group_idx as u32));
-				}
-			}
-
-			Arc::new(IndexedVec::<_, _>::from(vector))
-		})
-		.expect("Just inserted");
+	let validator_to_group =
+		per_session_cache.validator_to_group(session_index, &validators, &validator_groups);
 
-	let table_context = TableContext { validator, groups, validators, disabled_validators };
+	let table_context =
+		TableContext { validator, groups, validators: validators.to_vec(), disabled_validators };
 	let table_config = TableConfig {
 		allow_multiple_seconded: match mode {
 			ProspectiveParachainsMode::Enabled { .. } => true,
@@ -1217,7 +1383,8 @@ async fn construct_per_relay_parent_state<Context>(
 	Ok(Some(PerRelayParentState {
 		prospective_parachains_mode: mode,
 		parent,
-		session_index,
+		node_features,
+		executor_params,
 		assigned_core,
 		backed: HashSet::new(),
 		table: Table::new(table_config),
@@ -1229,7 +1396,7 @@ async fn construct_per_relay_parent_state<Context>(
 		inject_core_index,
 		n_cores: cores.len() as u32,
 		claim_queue: ClaimQueueSnapshot::from(claim_queue),
-		validator_to_group: validator_to_group.clone(),
+		validator_to_group,
 		group_rotation_info,
 	}))
 }
@@ -1895,7 +2062,8 @@ async fn kick_off_validation_work<Context>(
 			tx_command: background_validation_tx.clone(),
 			candidate: attesting.candidate,
 			relay_parent: rp_state.parent,
-			session_index: rp_state.session_index,
+			node_features: rp_state.node_features.clone(),
+			executor_params: Arc::clone(&rp_state.executor_params),
 			persisted_validation_data,
 			pov,
 			n_validators: rp_state.table_context.validators.len(),
@@ -2049,7 +2217,8 @@ async fn validate_and_second<Context>(
 			tx_command: background_validation_tx.clone(),
 			candidate: candidate.clone(),
 			relay_parent: rp_state.parent,
-			session_index: rp_state.session_index,
+			node_features: rp_state.node_features.clone(),
+			executor_params: Arc::clone(&rp_state.executor_params),
 			persisted_validation_data,
 			pov: PoVData::Ready(pov),
 			n_validators: rp_state.table_context.validators.len(),
diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs
index 97e25c04282c4dad91415db3f30e598fdaf9fb97..5e3d503738709a31e44afb65ddb8c875864dd090 100644
--- a/polkadot/node/core/backing/src/tests/mod.rs
+++ b/polkadot/node/core/backing/src/tests/mod.rs
@@ -69,6 +69,14 @@ fn dummy_pvd() -> PersistedValidationData {
 	}
 }
 
+#[derive(Default)]
+struct PerSessionCacheState {
+	has_cached_validators: bool,
+	has_cached_node_features: bool,
+	has_cached_executor_params: bool,
+	has_cached_minimum_backing_votes: bool,
+}
+
 pub(crate) struct TestState {
 	chain_ids: Vec<ParaId>,
 	keystore: KeystorePtr,
@@ -85,6 +93,7 @@ pub(crate) struct TestState {
 	minimum_backing_votes: u32,
 	disabled_validators: Vec<ValidatorIndex>,
 	node_features: NodeFeatures,
+	per_session_cache_state: PerSessionCacheState,
 }
 
 impl TestState {
@@ -157,6 +166,7 @@ impl Default for TestState {
 			chain_ids,
 			keystore,
 			validators,
+			per_session_cache_state: PerSessionCacheState::default(),
 			validator_public,
 			validator_groups: (validator_groups, group_rotation_info),
 			validator_to_group,
@@ -251,7 +261,7 @@ impl TestCandidateBuilder {
 }
 
 // Tests that the subsystem performs actions that are required on startup.
-async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestState) {
+async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &mut TestState) {
 	// Start work on some new parent.
 	virtual_overseer
 		.send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(
@@ -278,16 +288,6 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS
 		}
 	);
 
-	// Check that subsystem job issues a request for a validator set.
-	assert_matches!(
-		virtual_overseer.recv().await,
-		AllMessages::RuntimeApi(
-			RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx))
-		) if parent == test_state.relay_parent => {
-			tx.send(Ok(test_state.validator_public.clone())).unwrap();
-		}
-	);
-
 	// Check that subsystem job issues a request for the validator groups.
 	assert_matches!(
 		virtual_overseer.recv().await,
@@ -308,26 +308,58 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS
 		}
 	);
 
-	// Node features request from runtime: all features are disabled.
-	assert_matches!(
-		virtual_overseer.recv().await,
-		AllMessages::RuntimeApi(
-			RuntimeApiMessage::Request(_parent, RuntimeApiRequest::NodeFeatures(_session_index, tx))
-		) => {
-			tx.send(Ok(test_state.node_features.clone())).unwrap();
-		}
-	);
+	if !test_state.per_session_cache_state.has_cached_validators {
+		// Check that subsystem job issues a request for a validator set.
+		assert_matches!(
+			virtual_overseer.recv().await,
+			AllMessages::RuntimeApi(
+				RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx))
+			) if parent == test_state.relay_parent => {
+				tx.send(Ok(test_state.validator_public.clone())).unwrap();
+			}
+		);
+		test_state.per_session_cache_state.has_cached_validators = true;
+	}
 
-	// Check if subsystem job issues a request for the minimum backing votes.
-	assert_matches!(
-		virtual_overseer.recv().await,
-		AllMessages::RuntimeApi(RuntimeApiMessage::Request(
-			parent,
-			RuntimeApiRequest::MinimumBackingVotes(session_index, tx),
-		)) if parent == test_state.relay_parent && session_index == test_state.signing_context.session_index => {
-			tx.send(Ok(test_state.minimum_backing_votes)).unwrap();
-		}
-	);
+	if !test_state.per_session_cache_state.has_cached_node_features {
+		// Node features request from runtime: all features are disabled.
+		assert_matches!(
+			virtual_overseer.recv().await,
+			AllMessages::RuntimeApi(
+				RuntimeApiMessage::Request(_parent, RuntimeApiRequest::NodeFeatures(_session_index, tx))
+			) => {
+				tx.send(Ok(test_state.node_features.clone())).unwrap();
+			}
+		);
+		test_state.per_session_cache_state.has_cached_node_features = true;
+	}
+
+	if !test_state.per_session_cache_state.has_cached_executor_params {
+		// Check if subsystem job issues a request for the executor parameters.
+		assert_matches!(
+			virtual_overseer.recv().await,
+			AllMessages::RuntimeApi(
+				RuntimeApiMessage::Request(_parent, RuntimeApiRequest::SessionExecutorParams(_session_index, tx))
+			) => {
+				tx.send(Ok(Some(ExecutorParams::default()))).unwrap();
+			}
+		);
+		test_state.per_session_cache_state.has_cached_executor_params = true;
+	}
+
+	if !test_state.per_session_cache_state.has_cached_minimum_backing_votes {
+		// Check if subsystem job issues a request for the minimum backing votes.
+		assert_matches!(
+			virtual_overseer.recv().await,
+			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+				parent,
+				RuntimeApiRequest::MinimumBackingVotes(session_index, tx),
+			)) if parent == test_state.relay_parent && session_index == test_state.signing_context.session_index => {
+				tx.send(Ok(test_state.minimum_backing_votes)).unwrap();
+			}
+		);
+		test_state.per_session_cache_state.has_cached_minimum_backing_votes = true;
+	}
 
 	// Check that subsystem job issues a request for the runtime version.
 	assert_matches!(
@@ -382,33 +414,6 @@ async fn assert_validation_requests(
 			tx.send(Ok(Some(validation_code))).unwrap();
 		}
 	);
-
-	assert_matches!(
-		virtual_overseer.recv().await,
-		AllMessages::RuntimeApi(
-			RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(tx))
-		) => {
-			tx.send(Ok(1u32.into())).unwrap();
-		}
-	);
-
-	assert_matches!(
-		virtual_overseer.recv().await,
-		AllMessages::RuntimeApi(
-			RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionExecutorParams(sess_idx, tx))
-		) if sess_idx == 1 => {
-			tx.send(Ok(Some(ExecutorParams::default()))).unwrap();
-		}
-	);
-
-	assert_matches!(
-		virtual_overseer.recv().await,
-		AllMessages::RuntimeApi(
-			RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(sess_idx, tx))
-		) if sess_idx == 1 => {
-			tx.send(Ok(NodeFeatures::EMPTY)).unwrap();
-		}
-	);
 }
 
 async fn assert_validate_from_exhaustive(
@@ -458,9 +463,9 @@ async fn assert_validate_from_exhaustive(
 // and in case validation is successful issues a `StatementDistributionMessage`.
 #[test]
 fn backing_second_works() {
-	let test_state = TestState::default();
+	let mut test_state = TestState::default();
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
-		test_startup(&mut virtual_overseer, &test_state).await;
+		test_startup(&mut virtual_overseer, &mut test_state).await;
 
 		let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd = dummy_pvd();
@@ -554,7 +559,7 @@ fn backing_works(#[case] elastic_scaling_mvp: bool) {
 	}
 
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
-		test_startup(&mut virtual_overseer, &test_state).await;
+		test_startup(&mut virtual_overseer, &mut test_state).await;
 
 		let pov_ab = PoV { block_data: BlockData(vec![1, 2, 3]) };
 		let pvd_ab = dummy_pvd();
@@ -772,7 +777,7 @@ fn get_backed_candidate_preserves_order() {
 		.insert(CoreIndex(2), [test_state.chain_ids[1]].into_iter().collect());
 
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
-		test_startup(&mut virtual_overseer, &test_state).await;
+		test_startup(&mut virtual_overseer, &mut test_state).await;
 
 		let pov_a = PoV { block_data: BlockData(vec![1, 2, 3]) };
 		let pov_b = PoV { block_data: BlockData(vec![3, 4, 5]) };
@@ -1171,9 +1176,9 @@ fn extract_core_index_from_statement_works() {
 
 #[test]
 fn backing_works_while_validation_ongoing() {
-	let test_state = TestState::default();
+	let mut test_state = TestState::default();
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
-		test_startup(&mut virtual_overseer, &test_state).await;
+		test_startup(&mut virtual_overseer, &mut test_state).await;
 
 		let pov_abc = PoV { block_data: BlockData(vec![1, 2, 3]) };
 		let pvd_abc = dummy_pvd();
@@ -1366,9 +1371,9 @@ fn backing_works_while_validation_ongoing() {
 // be a misbehavior.
 #[test]
 fn backing_misbehavior_works() {
-	let test_state = TestState::default();
+	let mut test_state = TestState::default();
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
-		test_startup(&mut virtual_overseer, &test_state).await;
+		test_startup(&mut virtual_overseer, &mut test_state).await;
 
 		let pov_a = PoV { block_data: BlockData(vec![1, 2, 3]) };
 
@@ -1552,9 +1557,9 @@ fn backing_misbehavior_works() {
 // can still second a valid one afterwards.
 #[test]
 fn backing_dont_second_invalid() {
-	let test_state = TestState::default();
+	let mut test_state = TestState::default();
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
-		test_startup(&mut virtual_overseer, &test_state).await;
+		test_startup(&mut virtual_overseer, &mut test_state).await;
 
 		let pov_block_a = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd_a = dummy_pvd();
@@ -1712,9 +1717,9 @@ fn backing_dont_second_invalid() {
 // candidate we will not be issuing a `Seconded` statement on it.
 #[test]
 fn backing_second_after_first_fails_works() {
-	let test_state = TestState::default();
+	let mut test_state = TestState::default();
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
-		test_startup(&mut virtual_overseer, &test_state).await;
+		test_startup(&mut virtual_overseer, &mut test_state).await;
 
 		let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd_a = dummy_pvd();
@@ -1858,9 +1863,9 @@ fn backing_second_after_first_fails_works() {
 // the work of this subsystem and so it is not fatal to the node.
 #[test]
 fn backing_works_after_failed_validation() {
-	let test_state = TestState::default();
+	let mut test_state = TestState::default();
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
-		test_startup(&mut virtual_overseer, &test_state).await;
+		test_startup(&mut virtual_overseer, &mut test_state).await;
 
 		let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd_a = dummy_pvd();
@@ -2037,9 +2042,9 @@ fn candidate_backing_reorders_votes() {
 #[test]
 fn retry_works() {
 	// sp_tracing::try_init_simple();
-	let test_state = TestState::default();
+	let mut test_state = TestState::default();
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
-		test_startup(&mut virtual_overseer, &test_state).await;
+		test_startup(&mut virtual_overseer, &mut test_state).await;
 
 		let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd_a = dummy_pvd();
@@ -2134,7 +2139,7 @@ fn retry_works() {
 		virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await;
 
 		// Not deterministic which message comes first:
-		for _ in 0u32..6 {
+		for _ in 0u32..3 {
 			match virtual_overseer.recv().await {
 				AllMessages::Provisioner(ProvisionerMessage::ProvisionableData(
 					_,
@@ -2153,24 +2158,6 @@ fn retry_works() {
 				)) if hash == validation_code_a.hash() => {
 					tx.send(Ok(Some(validation_code_a.clone()))).unwrap();
 				},
-				AllMessages::RuntimeApi(RuntimeApiMessage::Request(
-					_,
-					RuntimeApiRequest::SessionIndexForChild(tx),
-				)) => {
-					tx.send(Ok(1u32.into())).unwrap();
-				},
-				AllMessages::RuntimeApi(RuntimeApiMessage::Request(
-					_,
-					RuntimeApiRequest::SessionExecutorParams(1, tx),
-				)) => {
-					tx.send(Ok(Some(ExecutorParams::default()))).unwrap();
-				},
-				AllMessages::RuntimeApi(RuntimeApiMessage::Request(
-					_,
-					RuntimeApiRequest::NodeFeatures(1, tx),
-				)) => {
-					tx.send(Ok(NodeFeatures::EMPTY)).unwrap();
-				},
 				msg => {
 					assert!(false, "Unexpected message: {:?}", msg);
 				},
@@ -2221,10 +2208,10 @@ fn retry_works() {
 
 #[test]
 fn observes_backing_even_if_not_validator() {
-	let test_state = TestState::default();
+	let mut test_state = TestState::default();
 	let empty_keystore = Arc::new(sc_keystore::LocalKeystore::in_memory());
 	test_harness(empty_keystore, |mut virtual_overseer| async move {
-		test_startup(&mut virtual_overseer, &test_state).await;
+		test_startup(&mut virtual_overseer, &mut test_state).await;
 
 		let pov = PoV { block_data: BlockData(vec![1, 2, 3]) };
 		let pvd = dummy_pvd();
@@ -2340,9 +2327,9 @@ fn observes_backing_even_if_not_validator() {
 // without prospective parachains.
 #[test]
 fn cannot_second_multiple_candidates_per_parent() {
-	let test_state = TestState::default();
+	let mut test_state = TestState::default();
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
-		test_startup(&mut virtual_overseer, &test_state).await;
+		test_startup(&mut virtual_overseer, &mut test_state).await;
 
 		let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd = dummy_pvd();
@@ -2478,13 +2465,13 @@ fn new_leaf_view_doesnt_clobber_old() {
 	let relay_parent_2 = Hash::repeat_byte(1);
 	assert_ne!(test_state.relay_parent, relay_parent_2);
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
-		test_startup(&mut virtual_overseer, &test_state).await;
+		test_startup(&mut virtual_overseer, &mut test_state).await;
 
 		// New leaf that doesn't clobber old.
 		{
 			let old_relay_parent = test_state.relay_parent;
 			test_state.relay_parent = relay_parent_2;
-			test_startup(&mut virtual_overseer, &test_state).await;
+			test_startup(&mut virtual_overseer, &mut test_state).await;
 			test_state.relay_parent = old_relay_parent;
 		}
 
@@ -2537,7 +2524,7 @@ fn disabled_validator_doesnt_distribute_statement_on_receiving_second() {
 	test_state.disabled_validators.push(ValidatorIndex(0));
 
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
-		test_startup(&mut virtual_overseer, &test_state).await;
+		test_startup(&mut virtual_overseer, &mut test_state).await;
 
 		let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd = dummy_pvd();
@@ -2585,7 +2572,7 @@ fn disabled_validator_doesnt_distribute_statement_on_receiving_statement() {
 	test_state.disabled_validators.push(ValidatorIndex(0));
 
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
-		test_startup(&mut virtual_overseer, &test_state).await;
+		test_startup(&mut virtual_overseer, &mut test_state).await;
 
 		let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd = dummy_pvd();
@@ -2647,7 +2634,7 @@ fn validator_ignores_statements_from_disabled_validators() {
 	test_state.disabled_validators.push(ValidatorIndex(2));
 
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
-		test_startup(&mut virtual_overseer, &test_state).await;
+		test_startup(&mut virtual_overseer, &mut test_state).await;
 
 		let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd = dummy_pvd();
diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs
index db5409ee4bd50206283ccbd82f9b94682142b1e1..a05408eff85ddd5b3a05c68c85dc7813898dc9f5 100644
--- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs
+++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs
@@ -39,7 +39,7 @@ fn get_parent_hash(hash: Hash) -> Hash {
 async fn activate_leaf(
 	virtual_overseer: &mut VirtualOverseer,
 	leaf: TestLeaf,
-	test_state: &TestState,
+	test_state: &mut TestState,
 ) {
 	let TestLeaf { activated, min_relay_parents } = leaf;
 	let leaf_hash = activated.hash;
@@ -140,16 +140,6 @@ async fn activate_leaf(
 			}
 		);
 
-		// Check that subsystem job issues a request for a validator set.
-		assert_matches!(
-			virtual_overseer.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx))
-			) if parent == hash => {
-				tx.send(Ok(test_state.validator_public.clone())).unwrap();
-			}
-		);
-
 		// Check that subsystem job issues a request for the validator groups.
 		assert_matches!(
 			virtual_overseer.recv().await,
@@ -172,26 +162,58 @@ async fn activate_leaf(
 			}
 		);
 
-		// Node features request from runtime: all features are disabled.
-		assert_matches!(
-			virtual_overseer.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(parent, RuntimeApiRequest::NodeFeatures(_session_index, tx))
-			) if parent == hash => {
-				tx.send(Ok(Default::default())).unwrap();
-			}
-		);
+		if !test_state.per_session_cache_state.has_cached_validators {
+			// Check that subsystem job issues a request for a validator set.
+			assert_matches!(
+				virtual_overseer.recv().await,
+				AllMessages::RuntimeApi(
+					RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx))
+				) if parent == hash => {
+					tx.send(Ok(test_state.validator_public.clone())).unwrap();
+				}
+			);
+			test_state.per_session_cache_state.has_cached_validators = true;
+		}
 
-		// Check if subsystem job issues a request for the minimum backing votes.
-		assert_matches!(
-			virtual_overseer.recv().await,
-			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
-				parent,
-				RuntimeApiRequest::MinimumBackingVotes(session_index, tx),
-			)) if parent == hash && session_index == test_state.signing_context.session_index => {
-				tx.send(Ok(test_state.minimum_backing_votes)).unwrap();
-			}
-		);
+		if !test_state.per_session_cache_state.has_cached_node_features {
+			// Node features request from runtime: all features are disabled.
+			assert_matches!(
+				virtual_overseer.recv().await,
+				AllMessages::RuntimeApi(
+					RuntimeApiMessage::Request(parent, RuntimeApiRequest::NodeFeatures(_session_index, tx))
+				) if parent == hash => {
+					tx.send(Ok(Default::default())).unwrap();
+				}
+			);
+			test_state.per_session_cache_state.has_cached_node_features = true;
+		}
+
+		if !test_state.per_session_cache_state.has_cached_executor_params {
+			// Check if subsystem job issues a request for the executor parameters.
+			assert_matches!(
+				virtual_overseer.recv().await,
+				AllMessages::RuntimeApi(
+					RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionExecutorParams(_session_index, tx))
+				) if parent == hash => {
+					tx.send(Ok(Some(ExecutorParams::default()))).unwrap();
+				}
+			);
+			test_state.per_session_cache_state.has_cached_executor_params = true;
+		}
+
+		if !test_state.per_session_cache_state.has_cached_minimum_backing_votes {
+			// Check if subsystem job issues a request for the minimum backing votes.
+			assert_matches!(
+				virtual_overseer.recv().await,
+				AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+					parent,
+					RuntimeApiRequest::MinimumBackingVotes(session_index, tx),
+				)) if parent == hash && session_index == test_state.signing_context.session_index => {
+					tx.send(Ok(test_state.minimum_backing_votes)).unwrap();
+				}
+			);
+			test_state.per_session_cache_state.has_cached_minimum_backing_votes = true;
+		}
 
 		// Check that subsystem job issues a request for the runtime version.
 		assert_matches!(
@@ -348,7 +370,7 @@ fn make_hypothetical_membership_response(
 // for all leaves.
 #[test]
 fn seconding_sanity_check_allowed_on_all() {
-	let test_state = TestState::default();
+	let mut test_state = TestState::default();
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
 		// Candidate is seconded in a parent of the activated `leaf_a`.
 		const LEAF_A_BLOCK_NUMBER: BlockNumber = 100;
@@ -370,8 +392,8 @@ fn seconding_sanity_check_allowed_on_all() {
 		let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)];
 		let test_leaf_b = TestLeaf { activated, min_relay_parents };
 
-		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await;
-		activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_b, &mut test_state).await;
 
 		let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd = dummy_pvd();
@@ -480,7 +502,7 @@ fn seconding_sanity_check_allowed_on_all() {
 // for all leaves.
 #[test]
 fn seconding_sanity_check_disallowed() {
-	let test_state = TestState::default();
+	let mut test_state = TestState::default();
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
 		// Candidate is seconded in a parent of the activated `leaf_a`.
 		const LEAF_A_BLOCK_NUMBER: BlockNumber = 100;
@@ -502,7 +524,7 @@ fn seconding_sanity_check_disallowed() {
 		let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)];
 		let test_leaf_b = TestLeaf { activated, min_relay_parents };
 
-		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await;
 
 		let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd = dummy_pvd();
@@ -594,8 +616,9 @@ fn seconding_sanity_check_disallowed() {
 			}
 		);
 
-		activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_b, &mut test_state).await;
 		let leaf_a_grandparent = get_parent_hash(leaf_a_parent);
+		let expected_head_data = test_state.head_data.get(&para_id).unwrap();
 		let candidate = TestCandidateBuilder {
 			para_id,
 			relay_parent: leaf_a_grandparent,
@@ -667,7 +690,7 @@ fn seconding_sanity_check_disallowed() {
 // leaf.
 #[test]
 fn seconding_sanity_check_allowed_on_at_least_one_leaf() {
-	let test_state = TestState::default();
+	let mut test_state = TestState::default();
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
 		// Candidate is seconded in a parent of the activated `leaf_a`.
 		const LEAF_A_BLOCK_NUMBER: BlockNumber = 100;
@@ -689,8 +712,8 @@ fn seconding_sanity_check_allowed_on_at_least_one_leaf() {
 		let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)];
 		let test_leaf_b = TestLeaf { activated, min_relay_parents };
 
-		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await;
-		activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_b, &mut test_state).await;
 
 		let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd = dummy_pvd();
@@ -798,7 +821,7 @@ fn seconding_sanity_check_allowed_on_at_least_one_leaf() {
 // subsystem doesn't change the view.
 #[test]
 fn prospective_parachains_reject_candidate() {
-	let test_state = TestState::default();
+	let mut test_state = TestState::default();
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
 		// Candidate is seconded in a parent of the activated `leaf_a`.
 		const LEAF_A_BLOCK_NUMBER: BlockNumber = 100;
@@ -811,7 +834,7 @@ fn prospective_parachains_reject_candidate() {
 		let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)];
 		let test_leaf_a = TestLeaf { activated, min_relay_parents };
 
-		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await;
 
 		let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd = dummy_pvd();
@@ -961,7 +984,7 @@ fn prospective_parachains_reject_candidate() {
 // Test that a validator can second multiple candidates per single relay parent.
 #[test]
 fn second_multiple_candidates_per_relay_parent() {
-	let test_state = TestState::default();
+	let mut test_state = TestState::default();
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
 		// Candidate `a` is seconded in a parent of the activated `leaf`.
 		const LEAF_BLOCK_NUMBER: BlockNumber = 100;
@@ -975,7 +998,7 @@ fn second_multiple_candidates_per_relay_parent() {
 		let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)];
 		let test_leaf_a = TestLeaf { activated, min_relay_parents };
 
-		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await;
 
 		let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd = dummy_pvd();
@@ -1083,7 +1106,7 @@ fn second_multiple_candidates_per_relay_parent() {
 // Test that the candidate reaches quorum successfully.
 #[test]
 fn backing_works() {
-	let test_state = TestState::default();
+	let mut test_state = TestState::default();
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
 		// Candidate `a` is seconded in a parent of the activated `leaf`.
 		const LEAF_BLOCK_NUMBER: BlockNumber = 100;
@@ -1096,7 +1119,7 @@ fn backing_works() {
 		let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)];
 		let test_leaf_a = TestLeaf { activated, min_relay_parents };
 
-		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await;
 
 		let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd = dummy_pvd();
@@ -1225,7 +1248,7 @@ fn backing_works() {
 // Tests that validators start work on consecutive prospective parachain blocks.
 #[test]
 fn concurrent_dependent_candidates() {
-	let test_state = TestState::default();
+	let mut test_state = TestState::default();
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
 		// Candidate `a` is seconded in a grandparent of the activated `leaf`,
 		// candidate `b` -- in parent.
@@ -1240,7 +1263,7 @@ fn concurrent_dependent_candidates() {
 		let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)];
 		let test_leaf_a = TestLeaf { activated, min_relay_parents };
 
-		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await;
 
 		let head_data = &[
 			HeadData(vec![10, 20, 30]), // Before `a`.
@@ -1436,32 +1459,12 @@ fn concurrent_dependent_candidates() {
 						break
 					}
 				},
-				AllMessages::RuntimeApi(RuntimeApiMessage::Request(
-					_,
-					RuntimeApiRequest::SessionIndexForChild(tx),
-				)) => {
-					tx.send(Ok(1u32.into())).unwrap();
-				},
-				AllMessages::RuntimeApi(RuntimeApiMessage::Request(
-					_,
-					RuntimeApiRequest::SessionExecutorParams(sess_idx, tx),
-				)) => {
-					assert_eq!(sess_idx, 1);
-					tx.send(Ok(Some(ExecutorParams::default()))).unwrap();
-				},
 				AllMessages::RuntimeApi(RuntimeApiMessage::Request(
 					_parent,
 					RuntimeApiRequest::ValidatorGroups(tx),
 				)) => {
 					tx.send(Ok(test_state.validator_groups.clone())).unwrap();
 				},
-				AllMessages::RuntimeApi(RuntimeApiMessage::Request(
-					_,
-					RuntimeApiRequest::NodeFeatures(sess_idx, tx),
-				)) => {
-					assert_eq!(sess_idx, 1);
-					tx.send(Ok(NodeFeatures::EMPTY)).unwrap();
-				},
 				AllMessages::RuntimeApi(RuntimeApiMessage::Request(
 					_parent,
 					RuntimeApiRequest::AvailabilityCores(tx),
@@ -1485,7 +1488,7 @@ fn concurrent_dependent_candidates() {
 // in a given relay parent.
 #[test]
 fn seconding_sanity_check_occupy_same_depth() {
-	let test_state = TestState::default();
+	let mut test_state = TestState::default();
 	test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move {
 		// Candidate `a` is seconded in a parent of the activated `leaf`.
 		const LEAF_BLOCK_NUMBER: BlockNumber = 100;
@@ -1502,7 +1505,7 @@ fn seconding_sanity_check_occupy_same_depth() {
 		let min_relay_parents = vec![(para_id_a, min_block_number), (para_id_b, min_block_number)];
 		let test_leaf_a = TestLeaf { activated, min_relay_parents };
 
-		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await;
 
 		let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd = dummy_pvd();
@@ -1647,7 +1650,7 @@ fn occupied_core_assignment() {
 		let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)];
 		let test_leaf_a = TestLeaf { activated, min_relay_parents };
 
-		activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await;
+		activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await;
 
 		let pov = PoV { block_data: BlockData(vec![42, 43, 44]) };
 		let pvd = dummy_pvd();
diff --git a/prdoc/pr_6284.prdoc b/prdoc/pr_6284.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..e2d9ebb526d2924710b91721fed838c674a3e949
--- /dev/null
+++ b/prdoc/pr_6284.prdoc
@@ -0,0 +1,22 @@
+title: "backing: improve session buffering for runtime information"
+
+doc:
+  - audience: Node Dev
+    description: |
+      This PR implements caching within the backing module for session-stable information,
+      reducing redundant runtime API calls.
+
+      Specifically, it introduces a local cache for the:
+        - validators list;
+        - node features;
+        - executor parameters;
+        - minimum backing votes threshold;
+        - validator-to-group mapping.
+
+      Previously, this data was fetched or computed repeatedly each time `PerRelayParentState`
+      was built. With this update, the cached information is fetched once and reused throughout
+      the session.
+
+crates:
+  - name: polkadot-node-core-backing
+    bump: patch