diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml
index d17380839942a1243dbc9b8d90768b07b05c5415..60870caf26c82133e5a155748245cd6457bc12db 100644
--- a/.gitlab/pipeline/zombienet/polkadot.yml
+++ b/.gitlab/pipeline/zombienet/polkadot.yml
@@ -233,6 +233,25 @@ zombienet-polkadot-functional-0016-approval-voting-parallel:
       --local-dir="${LOCAL_DIR}/functional"
       --test="0016-approval-voting-parallel.zndsl"
 
+zombienet-polkadot-functional-0017-sync-backing:
+  extends:
+    - .zombienet-polkadot-common
+  script:
+    - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh
+      --local-dir="${LOCAL_DIR}/functional"
+      --test="0017-sync-backing.zndsl"
+
+zombienet-polkadot-functional-0018-shared-core-idle-parachain:
+  extends:
+    - .zombienet-polkadot-common
+  before_script:
+    - !reference [ .zombienet-polkadot-common, before_script ]
+    - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/functional
+  script:
+    - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh
+      --local-dir="${LOCAL_DIR}/functional"
+      --test="0018-shared-core-idle-parachain.zndsl"
+
 zombienet-polkadot-smoke-0001-parachains-smoke-test:
   extends:
     - .zombienet-polkadot-common
diff --git a/polkadot/primitives/src/v8/mod.rs b/polkadot/primitives/src/v8/mod.rs
index a51ee0bd99bfe9d2c8db17b1db5cdfadce489277..cca327df42c9d0d37a8698b91931e3003ee5535a 100644
--- a/polkadot/primitives/src/v8/mod.rs
+++ b/polkadot/primitives/src/v8/mod.rs
@@ -2093,7 +2093,9 @@ pub struct SchedulerParams<BlockNumber> {
 	pub lookahead: u32,
 	/// How many cores are managed by the coretime chain.
 	pub num_cores: u32,
-	/// The max number of times a claim can time out in availability.
+	/// Deprecated and no longer used by the runtime.
+	/// Removal is tracked by <https://github.com/paritytech/polkadot-sdk/issues/6067>.
+	#[deprecated]
 	pub max_availability_timeouts: u32,
 	/// The maximum queue size of the pay as you go module.
 	pub on_demand_queue_max_size: u32,
@@ -2104,13 +2106,14 @@ pub struct SchedulerParams<BlockNumber> {
 	pub on_demand_fee_variability: Perbill,
 	/// The minimum amount needed to claim a slot in the spot pricing queue.
 	pub on_demand_base_fee: Balance,
-	/// The number of blocks a claim stays in the scheduler's claim queue before getting cleared.
-	/// This number should go reasonably higher than the number of blocks in the async backing
-	/// lookahead.
+	/// Deprecated and no longer used by the runtime.
+	/// Removal is tracked by <https://github.com/paritytech/polkadot-sdk/issues/6067>.
+	#[deprecated]
 	pub ttl: BlockNumber,
 }
 
 impl<BlockNumber: Default + From<u32>> Default for SchedulerParams<BlockNumber> {
+	#[allow(deprecated)]
 	fn default() -> Self {
 		Self {
 			group_rotation_frequency: 1u32.into(),
diff --git a/polkadot/runtime/parachains/src/assigner_coretime/mod.rs b/polkadot/runtime/parachains/src/assigner_coretime/mod.rs
index 7ee76600b42c07bdbb011ddad1d238ec95b89bcf..33a36a1bb2ea6044a721b0ced6de57c12abc7260 100644
--- a/polkadot/runtime/parachains/src/assigner_coretime/mod.rs
+++ b/polkadot/runtime/parachains/src/assigner_coretime/mod.rs
@@ -318,9 +318,12 @@ impl<T: Config> AssignmentProvider<BlockNumberFor<T>> for Pallet<T> {
 		Assignment::Bulk(para_id)
 	}
 
-	fn session_core_count() -> u32 {
-		let config = configuration::ActiveConfig::<T>::get();
-		config.scheduler_params.num_cores
+	fn assignment_duplicated(assignment: &Assignment) {
+		match assignment {
+			Assignment::Pool { para_id, core_index } =>
+				on_demand::Pallet::<T>::assignment_duplicated(*para_id, *core_index),
+			Assignment::Bulk(_) => {},
+		}
 	}
 }
 
diff --git a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs
index e7994b8ef820773b4f310b6285303263170a2638..25007f0eed6aac023836456b95fef41f6d205044 100644
--- a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs
+++ b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs
@@ -26,7 +26,6 @@ use crate::{
 	paras::{ParaGenesisArgs, ParaKind},
 	scheduler::common::Assignment,
 };
-use alloc::collections::btree_map::BTreeMap;
 use frame_support::{assert_noop, assert_ok, pallet_prelude::*, traits::Currency};
 use pallet_broker::TaskId;
 use polkadot_primitives::{BlockNumber, Id as ParaId, SessionIndex, ValidationCode};
@@ -78,7 +77,7 @@ fn run_to_block(
 		OnDemand::on_initialize(b + 1);
 
 		// In the real runtime this is expected to be called by the `InclusionInherent` pallet.
-		Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), b + 1);
+		Scheduler::advance_claim_queue(&Default::default());
 	}
 }
 
diff --git a/polkadot/runtime/parachains/src/assigner_parachains.rs b/polkadot/runtime/parachains/src/assigner_parachains.rs
index 3c735b999cf2313b01150cb64832d0a394ce542e..53edae5c32fc9e8e00d50971f0916818849668dc 100644
--- a/polkadot/runtime/parachains/src/assigner_parachains.rs
+++ b/polkadot/runtime/parachains/src/assigner_parachains.rs
@@ -63,7 +63,5 @@ impl<T: Config> AssignmentProvider<BlockNumberFor<T>> for Pallet<T> {
 		Assignment::Bulk(para_id)
 	}
 
-	fn session_core_count() -> u32 {
-		paras::Parachains::<T>::decode_len().unwrap_or(0) as u32
-	}
+	fn assignment_duplicated(_: &Assignment) {}
 }
diff --git a/polkadot/runtime/parachains/src/assigner_parachains/tests.rs b/polkadot/runtime/parachains/src/assigner_parachains/tests.rs
index 817e43a7138ddbe121e84ea6c02497e07b323469..6e8e185bb48dfc0d70cef6b182b5224a1603d6b3 100644
--- a/polkadot/runtime/parachains/src/assigner_parachains/tests.rs
+++ b/polkadot/runtime/parachains/src/assigner_parachains/tests.rs
@@ -23,7 +23,6 @@ use crate::{
 	},
 	paras::{ParaGenesisArgs, ParaKind},
 };
-use alloc::collections::btree_map::BTreeMap;
 use frame_support::{assert_ok, pallet_prelude::*};
 use polkadot_primitives::{BlockNumber, Id as ParaId, SessionIndex, ValidationCode};
 
@@ -71,7 +70,7 @@ fn run_to_block(
 		Scheduler::initializer_initialize(b + 1);
 
 		// In the real runtime this is expected to be called by the `InclusionInherent` pallet.
-		Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), b + 1);
+		Scheduler::advance_claim_queue(&Default::default());
 	}
 }
 
diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs
index 1654590d109e97aecc3de062ebb57489274c5026..fa9497f8ccd5cca79781abd9137991866cbbdb39 100644
--- a/polkadot/runtime/parachains/src/builder.rs
+++ b/polkadot/runtime/parachains/src/builder.rs
@@ -18,7 +18,10 @@ use crate::{
 	configuration, inclusion, initializer, paras,
 	paras::ParaKind,
 	paras_inherent,
-	scheduler::{self, common::AssignmentProvider, CoreOccupied, ParasEntry},
+	scheduler::{
+		self,
+		common::{Assignment, AssignmentProvider},
+	},
 	session_info, shared,
 };
 use alloc::{
@@ -138,8 +141,6 @@ pub(crate) struct BenchBuilder<T: paras_inherent::Config> {
 	/// Make every candidate include a code upgrade by setting this to `Some` where the interior
 	/// value is the byte length of the new code.
 	code_upgrade: Option<u32>,
-	/// Specifies whether the claimqueue should be filled.
-	fill_claimqueue: bool,
 	/// Cores which should not be available when being populated with pending candidates.
 	unavailable_cores: Vec<u32>,
 	/// Use v2 candidate descriptor.
@@ -178,7 +179,6 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
 			backed_in_inherent_paras: Default::default(),
 			elastic_paras: Default::default(),
 			code_upgrade: None,
-			fill_claimqueue: true,
 			unavailable_cores: vec![],
 			candidate_descriptor_v2: false,
 			candidate_modifier: None,
@@ -322,13 +322,6 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
 		self.max_validators() / self.max_validators_per_core()
 	}
 
-	/// Set whether the claim queue should be filled.
-	#[cfg(not(feature = "runtime-benchmarks"))]
-	pub(crate) fn set_fill_claimqueue(mut self, f: bool) -> Self {
-		self.fill_claimqueue = f;
-		self
-	}
-
 	/// Get the minimum number of validity votes in order for a backed candidate to be included.
 	#[cfg(feature = "runtime-benchmarks")]
 	pub(crate) fn fallback_min_backing_votes() -> u32 {
@@ -340,10 +333,13 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
 		HeadData(vec![0xFF; max_head_size as usize])
 	}
 
-	fn candidate_descriptor_mock(candidate_descriptor_v2: bool) -> CandidateDescriptorV2<T::Hash> {
+	fn candidate_descriptor_mock(
+		para_id: ParaId,
+		candidate_descriptor_v2: bool,
+	) -> CandidateDescriptorV2<T::Hash> {
 		if candidate_descriptor_v2 {
 			CandidateDescriptorV2::new(
-				0.into(),
+				para_id,
 				Default::default(),
 				CoreIndex(200),
 				2,
@@ -356,7 +352,7 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
 		} else {
 			// Convert v1 to v2.
 			CandidateDescriptor::<T::Hash> {
-				para_id: 0.into(),
+				para_id,
 				relay_parent: Default::default(),
 				collator: junk_collator(),
 				persisted_validation_data_hash: Default::default(),
@@ -373,6 +369,7 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
 
 	/// Create a mock of `CandidatePendingAvailability`.
 	fn candidate_availability_mock(
+		para_id: ParaId,
 		group_idx: GroupIndex,
 		core_idx: CoreIndex,
 		candidate_hash: CandidateHash,
@@ -381,15 +378,16 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
 		candidate_descriptor_v2: bool,
 	) -> inclusion::CandidatePendingAvailability<T::Hash, BlockNumberFor<T>> {
 		inclusion::CandidatePendingAvailability::<T::Hash, BlockNumberFor<T>>::new(
-			core_idx,                                                 // core
-			candidate_hash,                                           // hash
-			Self::candidate_descriptor_mock(candidate_descriptor_v2), // candidate descriptor
-			commitments,                                              // commitments
-			availability_votes,                                       // availability votes
-			Default::default(),                                       // backers
-			Zero::zero(),                                             // relay parent
-			One::one(),                                               /* relay chain block this
-			                                                           * was backed in */
+			core_idx,                                                          // core
+			candidate_hash,                                                    // hash
+			Self::candidate_descriptor_mock(para_id, candidate_descriptor_v2), /* candidate descriptor */
+			commitments,                                                       // commitments
+			availability_votes,                                                /* availability
+			                                                                    * votes */
+			Default::default(), // backers
+			Zero::zero(),       // relay parent
+			One::one(),         /* relay chain block this
+			                     * was backed in */
 			group_idx, // backing group
 		)
 	}
@@ -416,6 +414,7 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
 			hrmp_watermark: 0u32.into(),
 		};
 		let candidate_availability = Self::candidate_availability_mock(
+			para_id,
 			group_idx,
 			core_idx,
 			candidate_hash,
@@ -886,14 +885,11 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
 			extra_cores;
 
 		assert!(used_cores <= max_cores);
-		let fill_claimqueue = self.fill_claimqueue;
 
 		// NOTE: there is an n+2 session delay for these actions to take effect.
 		// We are currently in Session 0, so these changes will take effect in Session 2.
 		Self::setup_para_ids(used_cores - extra_cores);
-		configuration::ActiveConfig::<T>::mutate(|c| {
-			c.scheduler_params.num_cores = used_cores as u32;
-		});
+		configuration::Pallet::<T>::set_coretime_cores_unchecked(used_cores as u32).unwrap();
 
 		let validator_ids = generate_validator_pairs::<T>(self.max_validators());
 		let target_session = SessionIndex::from(self.target_session);
@@ -902,7 +898,7 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
 		let bitfields = builder.create_availability_bitfields(
 			&builder.backed_and_concluding_paras,
 			&builder.elastic_paras,
-			used_cores,
+			scheduler::Pallet::<T>::num_availability_cores(),
 		);
 
 		let mut backed_in_inherent = BTreeMap::new();
@@ -930,66 +926,57 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
 
 		assert_eq!(inclusion::PendingAvailability::<T>::iter().count(), used_cores - extra_cores);
 
-		// Mark all the used cores as occupied. We expect that there are
-		// `backed_and_concluding_paras` that are pending availability and that there are
-		// `used_cores - backed_and_concluding_paras ` which are about to be disputed.
-		let now = frame_system::Pallet::<T>::block_number() + One::one();
-
+		// Sanity check that the occupied cores reported by the inclusion module are what we expect
+		// to be.
 		let mut core_idx = 0u32;
 		let elastic_paras = &builder.elastic_paras;
-		// Assign potentially multiple cores to same parachains,
-		let cores = all_cores
+
+		let mut occupied_cores = inclusion::Pallet::<T>::get_occupied_cores()
+			.map(|(core, candidate)| (core, candidate.candidate_descriptor().para_id()))
+			.collect::<Vec<_>>();
+		occupied_cores.sort_by(|(core_a, _), (core_b, _)| core_a.0.cmp(&core_b.0));
+
+		let mut expected_cores = all_cores
 			.iter()
 			.flat_map(|(para_id, _)| {
 				(0..elastic_paras.get(&para_id).cloned().unwrap_or(1))
 					.map(|_para_local_core_idx| {
-						let ttl = configuration::ActiveConfig::<T>::get().scheduler_params.ttl;
-						// Load an assignment into provider so that one is present to pop
-						let assignment =
-							<T as scheduler::Config>::AssignmentProvider::get_mock_assignment(
-								CoreIndex(core_idx),
-								ParaId::from(*para_id),
-							);
+						let old_core_idx = core_idx;
 						core_idx += 1;
-						CoreOccupied::Paras(ParasEntry::new(assignment, now + ttl))
+						(CoreIndex(old_core_idx), ParaId::from(*para_id))
 					})
-					.collect::<Vec<CoreOccupied<_>>>()
+					.collect::<Vec<_>>()
 			})
-			.collect::<Vec<CoreOccupied<_>>>();
+			.collect::<Vec<_>>();
 
-		scheduler::AvailabilityCores::<T>::set(cores);
+		expected_cores.sort_by(|(core_a, _), (core_b, _)| core_a.0.cmp(&core_b.0));
 
-		core_idx = 0u32;
+		assert_eq!(expected_cores, occupied_cores);
 
 		// We need entries in the claim queue for those:
 		all_cores.append(&mut builder.backed_in_inherent_paras.clone());
 
-		if fill_claimqueue {
-			let cores = all_cores
-				.keys()
-				.flat_map(|para_id| {
-					(0..elastic_paras.get(&para_id).cloned().unwrap_or(1))
-						.map(|_para_local_core_idx| {
-							let ttl = configuration::ActiveConfig::<T>::get().scheduler_params.ttl;
-							// Load an assignment into provider so that one is present to pop
-							let assignment =
-								<T as scheduler::Config>::AssignmentProvider::get_mock_assignment(
-									CoreIndex(core_idx),
-									ParaId::from(*para_id),
-								);
-
-							core_idx += 1;
-							(
-								CoreIndex(core_idx - 1),
-								[ParasEntry::new(assignment, now + ttl)].into(),
-							)
-						})
-						.collect::<Vec<(CoreIndex, VecDeque<ParasEntry<_>>)>>()
-				})
-				.collect::<BTreeMap<CoreIndex, VecDeque<ParasEntry<_>>>>();
+		let mut core_idx = 0u32;
+		let cores = all_cores
+			.keys()
+			.flat_map(|para_id| {
+				(0..elastic_paras.get(&para_id).cloned().unwrap_or(1))
+					.map(|_para_local_core_idx| {
+						// Load an assignment into provider so that one is present to pop
+						let assignment =
+							<T as scheduler::Config>::AssignmentProvider::get_mock_assignment(
+								CoreIndex(core_idx),
+								ParaId::from(*para_id),
+							);
 
-			scheduler::ClaimQueue::<T>::set(cores);
-		}
+						core_idx += 1;
+						(CoreIndex(core_idx - 1), [assignment].into())
+					})
+					.collect::<Vec<(CoreIndex, VecDeque<Assignment>)>>()
+			})
+			.collect::<BTreeMap<CoreIndex, VecDeque<Assignment>>>();
+
+		scheduler::ClaimQueue::<T>::set(cores);
 
 		Bench::<T> {
 			data: ParachainsInherentData {
diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs
index 36888247580e75c51238afc127f6ab81c233bdd1..e5cf7c4d276e8ffc27883f246a18273c41fef4a8 100644
--- a/polkadot/runtime/parachains/src/configuration.rs
+++ b/polkadot/runtime/parachains/src/configuration.rs
@@ -337,8 +337,6 @@ pub enum InconsistentError<BlockNumber> {
 	ZeroMinimumBackingVotes,
 	/// `executor_params` are inconsistent.
 	InconsistentExecutorParams { inner: ExecutorParamError },
-	/// TTL should be bigger than lookahead
-	LookaheadExceedsTTL,
 	/// Lookahead is zero, while it must be at least 1 for parachains to work.
 	LookaheadZero,
 	/// Passed in queue size for on-demand was too large.
@@ -434,10 +432,6 @@ where
 			return Err(InconsistentExecutorParams { inner })
 		}
 
-		if self.scheduler_params.ttl < self.scheduler_params.lookahead.into() {
-			return Err(LookaheadExceedsTTL)
-		}
-
 		if self.scheduler_params.lookahead == 0 {
 			return Err(LookaheadZero)
 		}
@@ -686,18 +680,7 @@ pub mod pallet {
 			Self::set_coretime_cores_unchecked(new)
 		}
 
-		/// Set the max number of times a claim may timeout on a core before it is abandoned
-		#[pallet::call_index(7)]
-		#[pallet::weight((
-			T::WeightInfo::set_config_with_u32(),
-			DispatchClass::Operational,
-		))]
-		pub fn set_max_availability_timeouts(origin: OriginFor<T>, new: u32) -> DispatchResult {
-			ensure_root(origin)?;
-			Self::schedule_config_update(|config| {
-				config.scheduler_params.max_availability_timeouts = new;
-			})
-		}
+		// Call index 7 used to be `set_max_availability_timeouts`, which was removed.
 
 		/// Set the parachain validator-group rotation frequency
 		#[pallet::call_index(8)]
@@ -1193,18 +1176,8 @@ pub mod pallet {
 				config.scheduler_params.on_demand_target_queue_utilization = new;
 			})
 		}
-		/// Set the on demand (parathreads) ttl in the claimqueue.
-		#[pallet::call_index(51)]
-		#[pallet::weight((
-			T::WeightInfo::set_config_with_block_number(),
-			DispatchClass::Operational
-		))]
-		pub fn set_on_demand_ttl(origin: OriginFor<T>, new: BlockNumberFor<T>) -> DispatchResult {
-			ensure_root(origin)?;
-			Self::schedule_config_update(|config| {
-				config.scheduler_params.ttl = new;
-			})
-		}
+
+		// Call index 51 used to be `set_on_demand_ttl`, which was removed.
 
 		/// Set the minimum backing votes threshold.
 		#[pallet::call_index(52)]
diff --git a/polkadot/runtime/parachains/src/configuration/migration/v12.rs b/polkadot/runtime/parachains/src/configuration/migration/v12.rs
index 111b1a1999661aab11ecbe9746a3930061c88e1b..d1e0cf10a0ff75b8011b3c853f1d591b4294f30c 100644
--- a/polkadot/runtime/parachains/src/configuration/migration/v12.rs
+++ b/polkadot/runtime/parachains/src/configuration/migration/v12.rs
@@ -143,6 +143,7 @@ fn migrate_to_v12<T: Config>() -> Weight {
 					minimum_backing_votes                    : pre.minimum_backing_votes,
 					node_features                            : pre.node_features,
 					approval_voting_params                   : pre.approval_voting_params,
+					#[allow(deprecated)]
 					scheduler_params: SchedulerParams {
 							group_rotation_frequency             : pre.group_rotation_frequency,
 							paras_availability_period            : pre.paras_availability_period,
@@ -231,7 +232,10 @@ mod tests {
 		assert_eq!(v12.scheduler_params.paras_availability_period, 4);
 		assert_eq!(v12.scheduler_params.lookahead, 1);
 		assert_eq!(v12.scheduler_params.num_cores, 1);
-		assert_eq!(v12.scheduler_params.max_availability_timeouts, 0);
+		#[allow(deprecated)]
+		{
+			assert_eq!(v12.scheduler_params.max_availability_timeouts, 0);
+		}
 		assert_eq!(v12.scheduler_params.on_demand_queue_max_size, 10_000);
 		assert_eq!(
 			v12.scheduler_params.on_demand_target_queue_utilization,
@@ -239,7 +243,10 @@ mod tests {
 		);
 		assert_eq!(v12.scheduler_params.on_demand_fee_variability, Perbill::from_percent(3));
 		assert_eq!(v12.scheduler_params.on_demand_base_fee, 10_000_000);
-		assert_eq!(v12.scheduler_params.ttl, 5);
+		#[allow(deprecated)]
+		{
+			assert_eq!(v12.scheduler_params.ttl, 5);
+		}
 	}
 
 	#[test]
@@ -282,6 +289,7 @@ mod tests {
 
 			for (_, v12) in configs_to_check {
 				#[rustfmt::skip]
+				#[allow(deprecated)]
 				{
 					assert_eq!(v11.max_code_size                            , v12.max_code_size);
 					assert_eq!(v11.max_head_data_size                       , v12.max_head_data_size);
diff --git a/polkadot/runtime/parachains/src/configuration/tests.rs b/polkadot/runtime/parachains/src/configuration/tests.rs
index 0d20399e471ba7b66f93e0f46dfe7f1a3677ef84..a8689a04fe0416ee561c7b16924cab2d7c19c051 100644
--- a/polkadot/runtime/parachains/src/configuration/tests.rs
+++ b/polkadot/runtime/parachains/src/configuration/tests.rs
@@ -316,13 +316,14 @@ fn setting_pending_config_members() {
 			approval_voting_params: ApprovalVotingParams { max_approval_coalesce_count: 1 },
 			minimum_backing_votes: 5,
 			node_features: bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1],
+			#[allow(deprecated)]
 			scheduler_params: SchedulerParams {
 				group_rotation_frequency: 20,
 				paras_availability_period: 10,
 				max_validators_per_core: None,
 				lookahead: 3,
 				num_cores: 2,
-				max_availability_timeouts: 5,
+				max_availability_timeouts: 0,
 				on_demand_queue_max_size: 10_000u32,
 				on_demand_base_fee: 10_000_000u128,
 				on_demand_fee_variability: Perbill::from_percent(3),
@@ -355,11 +356,6 @@ fn setting_pending_config_members() {
 			new_config.scheduler_params.num_cores,
 		)
 		.unwrap();
-		Configuration::set_max_availability_timeouts(
-			RuntimeOrigin::root(),
-			new_config.scheduler_params.max_availability_timeouts,
-		)
-		.unwrap();
 		Configuration::set_group_rotation_frequency(
 			RuntimeOrigin::root(),
 			new_config.scheduler_params.group_rotation_frequency,
diff --git a/polkadot/runtime/parachains/src/coretime/migration.rs b/polkadot/runtime/parachains/src/coretime/migration.rs
index d4be135aad65767fdccf8aea785019940e81c2bb..52189be3d247bc57d32260cfdc1133ca28337b88 100644
--- a/polkadot/runtime/parachains/src/coretime/migration.rs
+++ b/polkadot/runtime/parachains/src/coretime/migration.rs
@@ -19,8 +19,6 @@
 pub use v_coretime::{GetLegacyLease, MigrateToCoretime};
 
 mod v_coretime {
-	#[cfg(feature = "try-runtime")]
-	use crate::scheduler::common::AssignmentProvider;
 	use crate::{
 		assigner_coretime, configuration,
 		coretime::{mk_coretime_call, Config, PartsOf57600, WeightInfo},
@@ -142,7 +140,8 @@ mod v_coretime {
 
 			let dmp_queue_size =
 				crate::dmp::Pallet::<T>::dmq_contents(T::BrokerId::get().into()).len() as u32;
-			let new_core_count = assigner_coretime::Pallet::<T>::session_core_count();
+			let config = configuration::ActiveConfig::<T>::get();
+			let new_core_count = config.scheduler_params.num_cores;
 			ensure!(new_core_count == prev_core_count, "Total number of cores need to not change.");
 			ensure!(
 				dmp_queue_size > prev_dmp_queue_size,
diff --git a/polkadot/runtime/parachains/src/disputes.rs b/polkadot/runtime/parachains/src/disputes.rs
index f86573dadf562fee7b4cb63190399cbe390a4865..d5a3f31e5943f26f784f9b4e58026f8213bdf12f 100644
--- a/polkadot/runtime/parachains/src/disputes.rs
+++ b/polkadot/runtime/parachains/src/disputes.rs
@@ -1309,3 +1309,11 @@ fn check_signature(
 
 	res
 }
+
+#[cfg(all(not(feature = "runtime-benchmarks"), test))]
+// Test helper for clearing the on-chain dispute data.
+pub(crate) fn clear_dispute_storage<T: Config>() {
+	let _ = Disputes::<T>::clear(u32::MAX, None);
+	let _ = BackersOnDisputes::<T>::clear(u32::MAX, None);
+	let _ = Included::<T>::clear(u32::MAX, None);
+}
diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs
index 36f874b8db1e9bb015657cc7b2628e64ee75443b..ea3a5d3cdda9f4f5881fa9c2470cb20e3f256332 100644
--- a/polkadot/runtime/parachains/src/inclusion/mod.rs
+++ b/polkadot/runtime/parachains/src/inclusion/mod.rs
@@ -50,7 +50,7 @@ use polkadot_primitives::{
 		CandidateReceiptV2 as CandidateReceipt,
 		CommittedCandidateReceiptV2 as CommittedCandidateReceipt,
 	},
-	well_known_keys, CandidateCommitments, CandidateHash, CoreIndex, GroupIndex, Hash, HeadData,
+	well_known_keys, CandidateCommitments, CandidateHash, CoreIndex, GroupIndex, HeadData,
 	Id as ParaId, SignedAvailabilityBitfields, SigningContext, UpwardMessage, ValidatorId,
 	ValidatorIndex, ValidityAttestation,
 };
@@ -161,16 +161,6 @@ impl<H, N> CandidatePendingAvailability<H, N> {
 		self.relay_parent_number.clone()
 	}
 
-	/// Get the candidate backing group.
-	pub(crate) fn backing_group(&self) -> GroupIndex {
-		self.backing_group
-	}
-
-	/// Get the candidate's backers.
-	pub(crate) fn backers(&self) -> &BitVec<u8, BitOrderLsb0> {
-		&self.backers
-	}
-
 	#[cfg(any(feature = "runtime-benchmarks", test))]
 	pub(crate) fn new(
 		core: CoreIndex,
@@ -207,24 +197,6 @@ pub trait RewardValidators {
 	fn reward_bitfields(validators: impl IntoIterator<Item = ValidatorIndex>);
 }
 
-/// Helper return type for `process_candidates`.
-#[derive(Encode, Decode, PartialEq, TypeInfo)]
-#[cfg_attr(test, derive(Debug))]
-pub(crate) struct ProcessedCandidates<H = Hash> {
-	pub(crate) core_indices: Vec<(CoreIndex, ParaId)>,
-	pub(crate) candidate_receipt_with_backing_validator_indices:
-		Vec<(CandidateReceipt<H>, Vec<(ValidatorIndex, ValidityAttestation)>)>,
-}
-
-impl<H> Default for ProcessedCandidates<H> {
-	fn default() -> Self {
-		Self {
-			core_indices: Vec::new(),
-			candidate_receipt_with_backing_validator_indices: Vec::new(),
-		}
-	}
-}
-
 /// Reads the footprint of queues for a specific origin type.
 pub trait QueueFootprinter {
 	type Origin;
@@ -514,6 +486,14 @@ impl<T: Config> Pallet<T> {
 		T::MessageQueue::sweep_queue(AggregateMessageOrigin::Ump(UmpQueueId::Para(para)));
 	}
 
+	pub(crate) fn get_occupied_cores(
+	) -> impl Iterator<Item = (CoreIndex, CandidatePendingAvailability<T::Hash, BlockNumberFor<T>>)>
+	{
+		PendingAvailability::<T>::iter_values().flat_map(|pending_candidates| {
+			pending_candidates.into_iter().map(|c| (c.core, c.clone()))
+		})
+	}
+
 	/// Extract the freed cores based on cores that became available.
 	///
 	/// Bitfields are expected to have been sanitized already. E.g. via `sanitize_bitfields`!
@@ -640,12 +620,15 @@ impl<T: Config> Pallet<T> {
 		candidates: &BTreeMap<ParaId, Vec<(BackedCandidate<T::Hash>, CoreIndex)>>,
 		group_validators: GV,
 		core_index_enabled: bool,
-	) -> Result<ProcessedCandidates<T::Hash>, DispatchError>
+	) -> Result<
+		Vec<(CandidateReceipt<T::Hash>, Vec<(ValidatorIndex, ValidityAttestation)>)>,
+		DispatchError,
+	>
 	where
 		GV: Fn(GroupIndex) -> Option<Vec<ValidatorIndex>>,
 	{
 		if candidates.is_empty() {
-			return Ok(ProcessedCandidates::default())
+			return Ok(Default::default())
 		}
 
 		let now = frame_system::Pallet::<T>::block_number();
@@ -654,7 +637,6 @@ impl<T: Config> Pallet<T> {
 		// Collect candidate receipts with backers.
 		let mut candidate_receipt_with_backing_validator_indices =
 			Vec::with_capacity(candidates.len());
-		let mut core_indices = Vec::with_capacity(candidates.len());
 
 		for (para_id, para_candidates) in candidates {
 			let mut latest_head_data = match Self::para_latest_head_data(para_id) {
@@ -708,7 +690,6 @@ impl<T: Config> Pallet<T> {
 				latest_head_data = candidate.candidate().commitments.head_data.clone();
 				candidate_receipt_with_backing_validator_indices
 					.push((candidate.receipt(), backer_idx_and_attestation));
-				core_indices.push((*core, *para_id));
 
 				// Update storage now
 				PendingAvailability::<T>::mutate(&para_id, |pending_availability| {
@@ -743,10 +724,7 @@ impl<T: Config> Pallet<T> {
 			}
 		}
 
-		Ok(ProcessedCandidates::<T::Hash> {
-			core_indices,
-			candidate_receipt_with_backing_validator_indices,
-		})
+		Ok(candidate_receipt_with_backing_validator_indices)
 	}
 
 	// Get the latest backed output head data of this para (including pending availability).
@@ -1173,7 +1151,9 @@ impl<T: Config> Pallet<T> {
 
 	/// Returns the first `CommittedCandidateReceipt` pending availability for the para provided, if
 	/// any.
-	pub(crate) fn candidate_pending_availability(
+	/// A para_id could have more than one candidates pending availability, if it's using elastic
+	/// scaling. These candidates form a chain. This function returns the first in the chain.
+	pub(crate) fn first_candidate_pending_availability(
 		para: ParaId,
 	) -> Option<CommittedCandidateReceipt<T::Hash>> {
 		PendingAvailability::<T>::get(&para).and_then(|p| {
@@ -1201,24 +1181,6 @@ impl<T: Config> Pallet<T> {
 			})
 			.unwrap_or_default()
 	}
-
-	/// Returns the metadata around the first candidate pending availability for the
-	/// para provided, if any.
-	pub(crate) fn pending_availability(
-		para: ParaId,
-	) -> Option<CandidatePendingAvailability<T::Hash, BlockNumberFor<T>>> {
-		PendingAvailability::<T>::get(&para).and_then(|p| p.get(0).cloned())
-	}
-
-	/// Returns the metadata around the candidate pending availability occupying the supplied core,
-	/// if any.
-	pub(crate) fn pending_availability_with_core(
-		para: ParaId,
-		core: CoreIndex,
-	) -> Option<CandidatePendingAvailability<T::Hash, BlockNumberFor<T>>> {
-		PendingAvailability::<T>::get(&para)
-			.and_then(|p| p.iter().find(|c| c.core == core).cloned())
-	}
 }
 
 const fn availability_threshold(n_validators: usize) -> usize {
diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs
index 87d21e209a49e5141aba6a7edf30e08e4addc573..188ba4995d8302044679cb3bcc3d66c9508ce785 100644
--- a/polkadot/runtime/parachains/src/inclusion/tests.rs
+++ b/polkadot/runtime/parachains/src/inclusion/tests.rs
@@ -1267,7 +1267,7 @@ fn candidate_checks() {
 				&group_validators,
 				false
 			),
-			Ok(ProcessedCandidates::default())
+			Ok(Default::default())
 		);
 
 		// Check candidate ordering
@@ -1563,20 +1563,16 @@ fn candidate_checks() {
 				None,
 			);
 
-			let ProcessedCandidates {
-				core_indices: occupied_cores,
-				candidate_receipt_with_backing_validator_indices,
-			} = ParaInclusion::process_candidates(
-				&allowed_relay_parents,
-				&vec![(thread_a_assignment.0, vec![(backed.clone(), thread_a_assignment.1)])]
-					.into_iter()
-					.collect(),
-				&group_validators,
-				false,
-			)
-			.expect("candidate is accepted with bad collator signature");
-
-			assert_eq!(occupied_cores, vec![(CoreIndex::from(2), thread_a)]);
+			let candidate_receipt_with_backing_validator_indices =
+				ParaInclusion::process_candidates(
+					&allowed_relay_parents,
+					&vec![(thread_a_assignment.0, vec![(backed.clone(), thread_a_assignment.1)])]
+						.into_iter()
+						.collect(),
+					&group_validators,
+					false,
+				)
+				.expect("candidate is accepted with bad collator signature");
 
 			let mut expected = std::collections::HashMap::<
 				CandidateHash,
@@ -1924,10 +1920,7 @@ fn backing_works() {
 			}
 		};
 
-		let ProcessedCandidates {
-			core_indices: occupied_cores,
-			candidate_receipt_with_backing_validator_indices,
-		} = ParaInclusion::process_candidates(
+		let candidate_receipt_with_backing_validator_indices = ParaInclusion::process_candidates(
 			&allowed_relay_parents,
 			&backed_candidates,
 			&group_validators,
@@ -1935,15 +1928,6 @@ fn backing_works() {
 		)
 		.expect("candidates scheduled, in order, and backed");
 
-		assert_eq!(
-			occupied_cores,
-			vec![
-				(CoreIndex::from(0), chain_a),
-				(CoreIndex::from(1), chain_b),
-				(CoreIndex::from(2), thread_a)
-			]
-		);
-
 		// Transform the votes into the setup we expect
 		let expected = {
 			let mut intermediate = std::collections::HashMap::<
@@ -2224,10 +2208,7 @@ fn backing_works_with_elastic_scaling_mvp() {
 			}
 		};
 
-		let ProcessedCandidates {
-			core_indices: occupied_cores,
-			candidate_receipt_with_backing_validator_indices,
-		} = ParaInclusion::process_candidates(
+		let candidate_receipt_with_backing_validator_indices = ParaInclusion::process_candidates(
 			&allowed_relay_parents,
 			&backed_candidates,
 			&group_validators,
@@ -2235,16 +2216,6 @@ fn backing_works_with_elastic_scaling_mvp() {
 		)
 		.expect("candidates scheduled, in order, and backed");
 
-		// Both b candidates will be backed.
-		assert_eq!(
-			occupied_cores,
-			vec![
-				(CoreIndex::from(0), chain_a),
-				(CoreIndex::from(1), chain_b),
-				(CoreIndex::from(2), chain_b),
-			]
-		);
-
 		// Transform the votes into the setup we expect
 		let mut expected = std::collections::HashMap::<
 			CandidateHash,
@@ -2420,18 +2391,15 @@ fn can_include_candidate_with_ok_code_upgrade() {
 			None,
 		);
 
-		let ProcessedCandidates { core_indices: occupied_cores, .. } =
-			ParaInclusion::process_candidates(
-				&allowed_relay_parents,
-				&vec![(chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)])]
-					.into_iter()
-					.collect::<BTreeMap<_, _>>(),
-				group_validators,
-				false,
-			)
-			.expect("candidates scheduled, in order, and backed");
-
-		assert_eq!(occupied_cores, vec![(CoreIndex::from(0), chain_a)]);
+		let _ = ParaInclusion::process_candidates(
+			&allowed_relay_parents,
+			&vec![(chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)])]
+				.into_iter()
+				.collect::<BTreeMap<_, _>>(),
+			group_validators,
+			false,
+		)
+		.expect("candidates scheduled, in order, and backed");
 
 		let backers = {
 			let num_backers = effective_minimum_backing_votes(
@@ -2846,7 +2814,7 @@ fn para_upgrade_delay_scheduled_from_inclusion() {
 			None,
 		);
 
-		let ProcessedCandidates { core_indices: occupied_cores, .. } =
+		let _ =
 			ParaInclusion::process_candidates(
 				&allowed_relay_parents,
 				&vec![(chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)])]
@@ -2857,8 +2825,6 @@ fn para_upgrade_delay_scheduled_from_inclusion() {
 			)
 			.expect("candidates scheduled, in order, and backed");
 
-		assert_eq!(occupied_cores, vec![(CoreIndex::from(0), chain_a)]);
-
 		// Run a couple of blocks before the inclusion.
 		run_to_block(7, |_| None);
 
diff --git a/polkadot/runtime/parachains/src/initializer.rs b/polkadot/runtime/parachains/src/initializer.rs
index 340f727097b58245d9fa9a2553f9e178544bcea1..6ee245fb5230c109ecf64c2615ea1df052eb79f9 100644
--- a/polkadot/runtime/parachains/src/initializer.rs
+++ b/polkadot/runtime/parachains/src/initializer.rs
@@ -87,10 +87,10 @@ impl<BlockNumber: Default + From<u32>> Default for SessionChangeNotification<Blo
 }
 
 #[derive(Encode, Decode, TypeInfo)]
-struct BufferedSessionChange {
-	validators: Vec<ValidatorId>,
-	queued: Vec<ValidatorId>,
-	session_index: SessionIndex,
+pub(crate) struct BufferedSessionChange {
+	pub validators: Vec<ValidatorId>,
+	pub queued: Vec<ValidatorId>,
+	pub session_index: SessionIndex,
 }
 
 pub trait WeightInfo {
@@ -149,7 +149,7 @@ pub mod pallet {
 	#[pallet::storage]
 	pub(super) type HasInitialized<T: Config> = StorageValue<_, ()>;
 
-	/// Buffered session changes along with the block number at which they should be applied.
+	/// Buffered session changes.
 	///
 	/// Typically this will be empty or one element long. Apart from that this item never hits
 	/// the storage.
@@ -157,7 +157,7 @@ pub mod pallet {
 	/// However this is a `Vec` regardless to handle various edge cases that may occur at runtime
 	/// upgrade boundaries or if governance intervenes.
 	#[pallet::storage]
-	pub(super) type BufferedSessionChanges<T: Config> =
+	pub(crate) type BufferedSessionChanges<T: Config> =
 		StorageValue<_, Vec<BufferedSessionChange>, ValueQuery>;
 
 	#[pallet::hooks]
@@ -254,9 +254,6 @@ impl<T: Config> Pallet<T> {
 			buf
 		};
 
-		// inform about upcoming new session
-		scheduler::Pallet::<T>::pre_new_session();
-
 		let configuration::SessionChangeOutcome { prev_config, new_config } =
 			configuration::Pallet::<T>::initializer_on_new_session(&session_index);
 		let new_config = new_config.unwrap_or_else(|| prev_config.clone());
@@ -328,6 +325,11 @@ impl<T: Config> Pallet<T> {
 	{
 		Self::on_new_session(changed, session_index, validators, queued)
 	}
+
+	/// Return whether at the end of this block a new session will be initialized.
+	pub(crate) fn upcoming_session_change() -> bool {
+		!BufferedSessionChanges::<T>::get().is_empty()
+	}
 }
 
 impl<T: Config> sp_runtime::BoundToRuntimeAppPublic for Pallet<T> {
diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs
index 80751a2b7a0297386a33db742a8d88cc46980db1..c23918708b21f009d9f508c119106818f38ce4a8 100644
--- a/polkadot/runtime/parachains/src/mock.rs
+++ b/polkadot/runtime/parachains/src/mock.rs
@@ -520,9 +520,6 @@ pub mod mock_assigner {
 		#[pallet::storage]
 		pub(super) type MockAssignmentQueue<T: Config> =
 			StorageValue<_, VecDeque<Assignment>, ValueQuery>;
-
-		#[pallet::storage]
-		pub(super) type MockCoreCount<T: Config> = StorageValue<_, u32, OptionQuery>;
 	}
 
 	impl<T: Config> Pallet<T> {
@@ -531,12 +528,6 @@ pub mod mock_assigner {
 		pub fn add_test_assignment(assignment: Assignment) {
 			MockAssignmentQueue::<T>::mutate(|queue| queue.push_back(assignment));
 		}
-
-		// Allows for customized core count in scheduler tests, rather than a core count
-		// derived from on-demand config + parachain count.
-		pub fn set_core_count(count: u32) {
-			MockCoreCount::<T>::set(Some(count));
-		}
 	}
 
 	impl<T: Config> AssignmentProvider<BlockNumber> for Pallet<T> {
@@ -554,20 +545,18 @@ pub mod mock_assigner {
 		}
 
 		// We don't care about core affinity in the test assigner
-		fn report_processed(_assignment: Assignment) {}
+		fn report_processed(_: Assignment) {}
 
-		// The results of this are tested in on_demand tests. No need to represent it
-		// in the mock assigner.
-		fn push_back_assignment(_assignment: Assignment) {}
+		fn push_back_assignment(assignment: Assignment) {
+			Self::add_test_assignment(assignment);
+		}
 
 		#[cfg(any(feature = "runtime-benchmarks", test))]
 		fn get_mock_assignment(_: CoreIndex, para_id: ParaId) -> Assignment {
 			Assignment::Bulk(para_id)
 		}
 
-		fn session_core_count() -> u32 {
-			MockCoreCount::<T>::get().unwrap_or(5)
-		}
+		fn assignment_duplicated(_: &Assignment) {}
 	}
 }
 
diff --git a/polkadot/runtime/parachains/src/on_demand/mod.rs b/polkadot/runtime/parachains/src/on_demand/mod.rs
index dc046c194fd0b114ba0407878dea05d497be1f1b..66400eb00fd9d7a64ed2ffd660ac0e0f7c3e9027 100644
--- a/polkadot/runtime/parachains/src/on_demand/mod.rs
+++ b/polkadot/runtime/parachains/src/on_demand/mod.rs
@@ -317,6 +317,11 @@ where
 		Some(assignment)
 	}
 
+	/// Report that an assignment was duplicated by the scheduler.
+	pub fn assignment_duplicated(para_id: ParaId, core_index: CoreIndex) {
+		Pallet::<T>::increase_affinity(para_id, core_index);
+	}
+
 	/// Report that the `para_id` & `core_index` combination was processed.
 	///
 	/// This should be called once it is clear that the assignment won't get pushed back anymore.
diff --git a/polkadot/runtime/parachains/src/on_demand/tests.rs b/polkadot/runtime/parachains/src/on_demand/tests.rs
index 9742954118103b5b46895dde5fc7456a59bf2877..7da16942c7ad6989a2188fe749c947141f073ee0 100644
--- a/polkadot/runtime/parachains/src/on_demand/tests.rs
+++ b/polkadot/runtime/parachains/src/on_demand/tests.rs
@@ -30,7 +30,6 @@ use crate::{
 	},
 	paras::{ParaGenesisArgs, ParaKind},
 };
-use alloc::collections::btree_map::BTreeMap;
 use core::cmp::{Ord, Ordering};
 use frame_support::{assert_noop, assert_ok};
 use pallet_balances::Error as BalancesError;
@@ -86,7 +85,7 @@ fn run_to_block(
 		OnDemand::on_initialize(b + 1);
 
 		// In the real runtime this is expected to be called by the `InclusionInherent` pallet.
-		Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), b + 1);
+		Scheduler::advance_claim_queue(&Default::default());
 	}
 }
 
diff --git a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs
index 266860061bed8830311237721deda12b2b0e9356..485e7211c1d2f27581e8e9524ed0ca108d6199d1 100644
--- a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs
+++ b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs
@@ -43,7 +43,8 @@ benchmarks! {
 	// Variant over `v`, the number of dispute statements in a dispute statement set. This gives the
 	// weight of a single dispute statement set.
 	enter_variable_disputes {
-		let v in 10..BenchBuilder::<T>::fallback_max_validators();
+		// The number of statements needs to be at least a third of the validator set size.
+		let v in 400..BenchBuilder::<T>::fallback_max_validators();
 
 		let scenario = BenchBuilder::<T>::new()
 			.set_dispute_sessions(&[2])
diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs
index 2aca0f2c728a8bf8b962d116de1b572af0fe42d9..4c1394fd1347395371a581619d56a70df1bc9c73 100644
--- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs
+++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs
@@ -27,8 +27,7 @@ use crate::{
 	inclusion::{self, CandidateCheckContext},
 	initializer,
 	metrics::METRICS,
-	paras,
-	scheduler::{self, FreedReason},
+	paras, scheduler,
 	shared::{self, AllowedRelayParentsTracker},
 	ParaId,
 };
@@ -38,6 +37,7 @@ use alloc::{
 	vec::Vec,
 };
 use bitvec::prelude::BitVec;
+use core::result::Result;
 use frame_support::{
 	defensive,
 	dispatch::{DispatchErrorWithPostInfo, PostDispatchInfo},
@@ -105,15 +105,6 @@ impl DisputedBitfield {
 	}
 }
 
-/// The context in which the inherent data is checked or processed.
-#[derive(PartialEq)]
-pub enum ProcessInherentDataContext {
-	/// Enables filtering/limits weight of inherent up to maximum block weight.
-	/// Invariant: InherentWeight <= BlockWeight.
-	ProvideInherent,
-	/// Checks the InherentWeight invariant.
-	Enter,
-}
 pub use pallet::*;
 
 #[frame_support::pallet]
@@ -140,11 +131,9 @@ pub mod pallet {
 		/// The hash of the submitted parent header doesn't correspond to the saved block hash of
 		/// the parent.
 		InvalidParentHeader,
-		/// The data given to the inherent will result in an overweight block.
-		InherentOverweight,
-		/// A candidate was filtered during inherent execution. This should have only been done
+		/// Inherent data was filtered during execution. This should have only been done
 		/// during creation.
-		CandidatesFilteredDuringExecution,
+		InherentDataFilteredDuringExecution,
 		/// Too many candidates supplied.
 		UnscheduledCandidate,
 	}
@@ -253,9 +242,12 @@ pub mod pallet {
 
 			ensure!(!Included::<T>::exists(), Error::<T>::TooManyInclusionInherents);
 			Included::<T>::set(Some(()));
+			let initial_data = data.clone();
 
-			Self::process_inherent_data(data, ProcessInherentDataContext::Enter)
-				.map(|(_processed, post_info)| post_info)
+			Self::process_inherent_data(data).and_then(|(processed, post_info)| {
+				ensure!(initial_data == processed, Error::<T>::InherentDataFilteredDuringExecution);
+				Ok(post_info)
+			})
 		}
 	}
 }
@@ -273,10 +265,7 @@ impl<T: Config> Pallet<T> {
 				return None
 			},
 		};
-		match Self::process_inherent_data(
-			parachains_inherent_data,
-			ProcessInherentDataContext::ProvideInherent,
-		) {
+		match Self::process_inherent_data(parachains_inherent_data) {
 			Ok((processed, _)) => Some(processed),
 			Err(err) => {
 				log::warn!(target: LOG_TARGET, "Processing inherent data failed: {:?}", err);
@@ -290,21 +279,12 @@ impl<T: Config> Pallet<T> {
 	/// The given inherent data is processed and state is altered accordingly. If any data could
 	/// not be applied (inconsistencies, weight limit, ...) it is removed.
 	///
-	/// When called from `create_inherent` the `context` must be set to
-	/// `ProcessInherentDataContext::ProvideInherent` so it guarantees the invariant that inherent
-	/// is not overweight.
-	/// It is **mandatory** that calls from `enter` set `context` to
-	/// `ProcessInherentDataContext::Enter` to ensure the weight invariant is checked.
-	///
 	/// Returns: Result containing processed inherent data and weight, the processed inherent would
 	/// consume.
 	fn process_inherent_data(
 		data: ParachainsInherentData<HeaderFor<T>>,
-		context: ProcessInherentDataContext,
-	) -> core::result::Result<
-		(ParachainsInherentData<HeaderFor<T>>, PostDispatchInfo),
-		DispatchErrorWithPostInfo,
-	> {
+	) -> Result<(ParachainsInherentData<HeaderFor<T>>, PostDispatchInfo), DispatchErrorWithPostInfo>
+	{
 		#[cfg(feature = "runtime-metrics")]
 		sp_io::init_tracing();
 
@@ -333,6 +313,27 @@ impl<T: Config> Pallet<T> {
 		let now = frame_system::Pallet::<T>::block_number();
 		let config = configuration::ActiveConfig::<T>::get();
 
+		// Before anything else, update the allowed relay-parents.
+		{
+			let parent_number = now - One::one();
+			let parent_storage_root = *parent_header.state_root();
+
+			shared::AllowedRelayParents::<T>::mutate(|tracker| {
+				tracker.update(
+					parent_hash,
+					parent_storage_root,
+					scheduler::ClaimQueue::<T>::get()
+						.into_iter()
+						.map(|(core_index, paras)| {
+							(core_index, paras.into_iter().map(|e| e.para_id()).collect())
+						})
+						.collect(),
+					parent_number,
+					config.async_backing_params.allowed_ancestry_len,
+				);
+			});
+		}
+
 		let candidates_weight = backed_candidates_weight::<T>(&backed_candidates);
 		let bitfields_weight = signed_bitfields_weight::<T>(&bitfields);
 		let disputes_weight = multi_dispute_statement_sets_weight::<T>(&disputes);
@@ -345,7 +346,7 @@ impl<T: Config> Pallet<T> {
 		log::debug!(target: LOG_TARGET, "Time weight before filter: {}, candidates + bitfields: {}, disputes: {}", weight_before_filtering.ref_time(), candidates_weight.ref_time() + bitfields_weight.ref_time(), disputes_weight.ref_time());
 
 		let current_session = shared::CurrentSessionIndex::<T>::get();
-		let expected_bits = scheduler::AvailabilityCores::<T>::get().len();
+		let expected_bits = scheduler::Pallet::<T>::num_availability_cores();
 		let validator_public = shared::ActiveValidatorKeys::<T>::get();
 
 		// We are assuming (incorrectly) to have all the weight (for the mandatory class or even
@@ -390,7 +391,7 @@ impl<T: Config> Pallet<T> {
 			T::DisputesHandler::filter_dispute_data(set, post_conclusion_acceptance_period)
 		};
 
-		// Limit the disputes first, since the following statements depend on the votes include
+		// Limit the disputes first, since the following statements depend on the votes included
 		// here.
 		let (checked_disputes_sets, checked_disputes_sets_consumed_weight) =
 			limit_and_sanitize_disputes::<T, _>(
@@ -399,7 +400,7 @@ impl<T: Config> Pallet<T> {
 				max_block_weight,
 			);
 
-		let mut all_weight_after = if context == ProcessInherentDataContext::ProvideInherent {
+		let mut all_weight_after = {
 			// Assure the maximum block weight is adhered, by limiting bitfields and backed
 			// candidates. Dispute statement sets were already limited before.
 			let non_disputes_weight = apply_weight_limit::<T>(
@@ -427,23 +428,6 @@ impl<T: Config> Pallet<T> {
 				log::warn!(target: LOG_TARGET, "Post weight limiting weight is still too large, time: {}, size: {}", all_weight_after.ref_time(), all_weight_after.proof_size());
 			}
 			all_weight_after
-		} else {
-			// This check is performed in the context of block execution. Ensures inherent weight
-			// invariants guaranteed by `create_inherent_data` for block authorship.
-			if weight_before_filtering.any_gt(max_block_weight) {
-				log::error!(
-					"Overweight para inherent data reached the runtime {:?}: {} > {}",
-					parent_hash,
-					weight_before_filtering,
-					max_block_weight
-				);
-			}
-
-			ensure!(
-				weight_before_filtering.all_lte(max_block_weight),
-				Error::<T>::InherentOverweight
-			);
-			weight_before_filtering
 		};
 
 		// Note that `process_checked_multi_dispute_data` will iterate and import each
@@ -567,98 +551,9 @@ impl<T: Config> Pallet<T> {
 			log::debug!(target: LOG_TARGET, "Evicted timed out cores: {:?}", freed_timeout);
 		}
 
-		// We'll schedule paras again, given freed cores, and reasons for freeing.
-		let freed = freed_concluded
-			.into_iter()
-			.map(|(c, _hash)| (c, FreedReason::Concluded))
-			.chain(freed_disputed.into_iter().map(|core| (core, FreedReason::Concluded)))
-			.chain(freed_timeout.into_iter().map(|c| (c, FreedReason::TimedOut)))
-			.collect::<BTreeMap<CoreIndex, FreedReason>>();
-		scheduler::Pallet::<T>::free_cores_and_fill_claim_queue(freed, now);
-
-		METRICS.on_candidates_processed_total(backed_candidates.len() as u64);
-
-		// After freeing cores and filling claims, but before processing backed candidates
-		// we update the allowed relay-parents.
-		{
-			let parent_number = now - One::one();
-			let parent_storage_root = *parent_header.state_root();
-
-			shared::AllowedRelayParents::<T>::mutate(|tracker| {
-				tracker.update(
-					parent_hash,
-					parent_storage_root,
-					scheduler::ClaimQueue::<T>::get()
-						.into_iter()
-						.map(|(core_index, paras)| {
-							(core_index, paras.into_iter().map(|e| e.para_id()).collect())
-						})
-						.collect(),
-					parent_number,
-					config.async_backing_params.allowed_ancestry_len,
-				);
-			});
-		}
-		let allowed_relay_parents = shared::AllowedRelayParents::<T>::get();
-
-		let core_index_enabled = configuration::ActiveConfig::<T>::get()
-			.node_features
-			.get(FeatureIndex::ElasticScalingMVP as usize)
-			.map(|b| *b)
-			.unwrap_or(false);
-
-		let allow_v2_receipts = configuration::ActiveConfig::<T>::get()
-			.node_features
-			.get(FeatureIndex::CandidateReceiptV2 as usize)
-			.map(|b| *b)
-			.unwrap_or(false);
-
-		let mut eligible: BTreeMap<ParaId, BTreeSet<CoreIndex>> = BTreeMap::new();
-		let mut total_eligible_cores = 0;
-
-		for (core_idx, para_id) in scheduler::Pallet::<T>::eligible_paras() {
-			total_eligible_cores += 1;
-			log::trace!(target: LOG_TARGET, "Found eligible para {:?} on core {:?}", para_id, core_idx);
-			eligible.entry(para_id).or_default().insert(core_idx);
-		}
-
-		let initial_candidate_count = backed_candidates.len();
-		let backed_candidates_with_core = sanitize_backed_candidates::<T>(
-			backed_candidates,
-			&allowed_relay_parents,
-			concluded_invalid_hashes,
-			eligible,
-			core_index_enabled,
-			allow_v2_receipts,
-		);
-		let count = count_backed_candidates(&backed_candidates_with_core);
-
-		ensure!(count <= total_eligible_cores, Error::<T>::UnscheduledCandidate);
-
-		METRICS.on_candidates_sanitized(count as u64);
-
-		// In `Enter` context (invoked during execution) no more candidates should be filtered,
-		// because they have already been filtered during `ProvideInherent` context. Abort in such
-		// cases.
-		if context == ProcessInherentDataContext::Enter {
-			ensure!(
-				initial_candidate_count == count,
-				Error::<T>::CandidatesFilteredDuringExecution
-			);
-		}
-
-		// Process backed candidates according to scheduled cores.
-		let inclusion::ProcessedCandidates::<<HeaderFor<T> as HeaderT>::Hash> {
-			core_indices: occupied,
-			candidate_receipt_with_backing_validator_indices,
-		} = inclusion::Pallet::<T>::process_candidates(
-			&allowed_relay_parents,
-			&backed_candidates_with_core,
-			scheduler::Pallet::<T>::group_validators,
-			core_index_enabled,
-		)?;
-		// Note which of the scheduled cores were actually occupied by a backed candidate.
-		scheduler::Pallet::<T>::occupied(occupied.into_iter().map(|e| (e.0, e.1)).collect());
+		// Back candidates.
+		let (candidate_receipt_with_backing_validator_indices, backed_candidates_with_core) =
+			Self::back_candidates(concluded_invalid_hashes, backed_candidates)?;
 
 		set_scrapable_on_chain_backings::<T>(
 			current_session,
@@ -672,6 +567,7 @@ impl<T: Config> Pallet<T> {
 
 		let bitfields = bitfields.into_iter().map(|v| v.into_unchecked()).collect();
 
+		let count = backed_candidates_with_core.len();
 		let processed = ParachainsInherentData {
 			bitfields,
 			backed_candidates: backed_candidates_with_core.into_iter().fold(
@@ -686,6 +582,104 @@ impl<T: Config> Pallet<T> {
 		};
 		Ok((processed, Some(all_weight_after).into()))
 	}
+
+	fn back_candidates(
+		concluded_invalid_hashes: BTreeSet<CandidateHash>,
+		backed_candidates: Vec<BackedCandidate<T::Hash>>,
+	) -> Result<
+		(
+			Vec<(CandidateReceipt<T::Hash>, Vec<(ValidatorIndex, ValidityAttestation)>)>,
+			BTreeMap<ParaId, Vec<(BackedCandidate<T::Hash>, CoreIndex)>>,
+		),
+		DispatchErrorWithPostInfo,
+	> {
+		let allowed_relay_parents = shared::AllowedRelayParents::<T>::get();
+		let upcoming_new_session = initializer::Pallet::<T>::upcoming_session_change();
+
+		METRICS.on_candidates_processed_total(backed_candidates.len() as u64);
+
+		if !upcoming_new_session {
+			let occupied_cores =
+				inclusion::Pallet::<T>::get_occupied_cores().map(|(core, _)| core).collect();
+
+			let mut eligible: BTreeMap<ParaId, BTreeSet<CoreIndex>> = BTreeMap::new();
+			let mut total_eligible_cores = 0;
+
+			for (core_idx, para_id) in Self::eligible_paras(&occupied_cores) {
+				total_eligible_cores += 1;
+				log::trace!(target: LOG_TARGET, "Found eligible para {:?} on core {:?}", para_id, core_idx);
+				eligible.entry(para_id).or_default().insert(core_idx);
+			}
+
+			let node_features = configuration::ActiveConfig::<T>::get().node_features;
+			let core_index_enabled = node_features
+				.get(FeatureIndex::ElasticScalingMVP as usize)
+				.map(|b| *b)
+				.unwrap_or(false);
+
+			let allow_v2_receipts = node_features
+				.get(FeatureIndex::CandidateReceiptV2 as usize)
+				.map(|b| *b)
+				.unwrap_or(false);
+
+			let backed_candidates_with_core = sanitize_backed_candidates::<T>(
+				backed_candidates,
+				&allowed_relay_parents,
+				concluded_invalid_hashes,
+				eligible,
+				core_index_enabled,
+				allow_v2_receipts,
+			);
+			let count = count_backed_candidates(&backed_candidates_with_core);
+
+			ensure!(count <= total_eligible_cores, Error::<T>::UnscheduledCandidate);
+
+			METRICS.on_candidates_sanitized(count as u64);
+
+			// Process backed candidates according to scheduled cores.
+			let candidate_receipt_with_backing_validator_indices =
+				inclusion::Pallet::<T>::process_candidates(
+					&allowed_relay_parents,
+					&backed_candidates_with_core,
+					scheduler::Pallet::<T>::group_validators,
+					core_index_enabled,
+				)?;
+
+			// We need to advance the claim queue on all cores, except for the ones that did not
+			// get freed in this block. The ones that did not get freed also cannot be newly
+			// occupied.
+			scheduler::Pallet::<T>::advance_claim_queue(&occupied_cores);
+
+			Ok((candidate_receipt_with_backing_validator_indices, backed_candidates_with_core))
+		} else {
+			log::debug!(
+				target: LOG_TARGET,
+				"Upcoming session change, not backing any new candidates."
+			);
+			// If we'll initialize a new session at the end of the block, we don't want to
+			// advance the claim queue.
+
+			Ok((vec![], BTreeMap::new()))
+		}
+	}
+
+	/// Paras that may get backed on cores.
+	///
+	/// 1. The para must be scheduled on core.
+	/// 2. Core needs to be free, otherwise backing is not possible.
+	///
+	/// We get a set of the occupied cores as input.
+	pub(crate) fn eligible_paras<'a>(
+		occupied_cores: &'a BTreeSet<CoreIndex>,
+	) -> impl Iterator<Item = (CoreIndex, ParaId)> + 'a {
+		scheduler::ClaimQueue::<T>::get().into_iter().filter_map(|(core_idx, queue)| {
+			if occupied_cores.contains(&core_idx) {
+				return None
+			}
+			let next_scheduled = queue.front()?;
+			Some((core_idx, next_scheduled.para_id()))
+		})
+	}
 }
 
 /// Derive a bitfield from dispute
@@ -1144,10 +1138,7 @@ fn sanitize_backed_candidates<T: crate::inclusion::Config>(
 }
 
 fn count_backed_candidates<B>(backed_candidates: &BTreeMap<ParaId, Vec<B>>) -> usize {
-	backed_candidates.iter().fold(0, |mut count, (_id, candidates)| {
-		count += candidates.len();
-		count
-	})
+	backed_candidates.values().map(|c| c.len()).sum()
 }
 
 /// Derive entropy from babe provided per block randomness.
diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs
index f5c3d507776439d0b69e2b694c8eaad9b6d85933..2c65298baf01b98569f38d7302d3658eb19cd0d6 100644
--- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs
+++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs
@@ -49,11 +49,10 @@ mod enter {
 
 	use crate::{
 		builder::{junk_collator, junk_collator_signature, Bench, BenchBuilder, CandidateModifier},
+		disputes::clear_dispute_storage,
+		initializer::BufferedSessionChange,
 		mock::{mock_assigner, new_test_ext, BlockLength, BlockWeights, RuntimeOrigin, Test},
-		scheduler::{
-			common::{Assignment, AssignmentProvider},
-			ParasEntry,
-		},
+		scheduler::common::{Assignment, AssignmentProvider},
 		session_info,
 	};
 	use alloc::collections::btree_map::BTreeMap;
@@ -73,7 +72,6 @@ mod enter {
 		backed_and_concluding: BTreeMap<u32, u32>,
 		num_validators_per_core: u32,
 		code_upgrade: Option<u32>,
-		fill_claimqueue: bool,
 		elastic_paras: BTreeMap<u32, u8>,
 		unavailable_cores: Vec<u32>,
 		v2_descriptor: bool,
@@ -87,7 +85,6 @@ mod enter {
 			backed_and_concluding,
 			num_validators_per_core,
 			code_upgrade,
-			fill_claimqueue,
 			elastic_paras,
 			unavailable_cores,
 			v2_descriptor,
@@ -108,14 +105,11 @@ mod enter {
 			.set_dispute_statements(dispute_statements)
 			.set_backed_and_concluding_paras(backed_and_concluding.clone())
 			.set_dispute_sessions(&dispute_sessions[..])
-			.set_fill_claimqueue(fill_claimqueue)
 			.set_unavailable_cores(unavailable_cores)
 			.set_candidate_descriptor_v2(v2_descriptor)
 			.set_candidate_modifier(candidate_modifier);
 
 		// Setup some assignments as needed:
-		mock_assigner::Pallet::<Test>::set_core_count(builder.max_cores());
-
 		(0..(builder.max_cores() as usize - extra_cores)).for_each(|para_id| {
 			(0..elastic_paras.get(&(para_id as u32)).cloned().unwrap_or(1)).for_each(
 				|_para_local_core_idx| {
@@ -164,7 +158,6 @@ mod enter {
 				backed_and_concluding,
 				num_validators_per_core: 1,
 				code_upgrade: None,
-				fill_claimqueue: true,
 				elastic_paras: BTreeMap::new(),
 				unavailable_cores: vec![],
 				v2_descriptor,
@@ -188,6 +181,7 @@ mod enter {
 			inherent_data
 				.put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data)
 				.unwrap();
+			assert!(!scheduler::Pallet::<Test>::claim_queue_is_empty());
 
 			// Nothing is filtered out (including the backed candidates.)
 			assert_eq!(
@@ -272,7 +266,6 @@ mod enter {
 				backed_and_concluding,
 				num_validators_per_core: 1,
 				code_upgrade: None,
-				fill_claimqueue: true,
 				elastic_paras: [(2, 3)].into_iter().collect(),
 				unavailable_cores: vec![],
 				v2_descriptor,
@@ -293,6 +286,7 @@ mod enter {
 				.put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data)
 				.unwrap();
 
+			assert!(!scheduler::Pallet::<Test>::claim_queue_is_empty());
 			assert!(pallet::OnChainVotes::<Test>::get().is_none());
 
 			// Nothing is filtered out (including the backed candidates.)
@@ -375,7 +369,6 @@ mod enter {
 				backed_and_concluding,
 				num_validators_per_core: 1,
 				code_upgrade: None,
-				fill_claimqueue: true,
 				elastic_paras: [(2, 4)].into_iter().collect(),
 				unavailable_cores: unavailable_cores.clone(),
 				v2_descriptor: false,
@@ -527,6 +520,101 @@ mod enter {
 		});
 	}
 
+	#[test]
+	// Test that no new candidates are backed if there's an upcoming session change scheduled at the
+	// end of the block. Claim queue will also not be advanced.
+	fn session_change() {
+		let config = MockGenesisConfig::default();
+		assert!(config.configuration.config.scheduler_params.lookahead > 0);
+
+		new_test_ext(config).execute_with(|| {
+			let dispute_statements = BTreeMap::new();
+
+			let mut backed_and_concluding = BTreeMap::new();
+			backed_and_concluding.insert(0, 1);
+			backed_and_concluding.insert(1, 1);
+
+			let scenario = make_inherent_data(TestConfig {
+				dispute_statements,
+				dispute_sessions: vec![], // No disputes
+				backed_and_concluding,
+				num_validators_per_core: 1,
+				code_upgrade: None,
+				elastic_paras: BTreeMap::new(),
+				unavailable_cores: vec![],
+				v2_descriptor: false,
+				candidate_modifier: None,
+			});
+
+			let prev_claim_queue = scheduler::ClaimQueue::<Test>::get();
+
+			assert_eq!(inclusion::PendingAvailability::<Test>::iter().count(), 2);
+			assert_eq!(
+				inclusion::PendingAvailability::<Test>::get(ParaId::from(0)).unwrap().len(),
+				1
+			);
+			assert_eq!(
+				inclusion::PendingAvailability::<Test>::get(ParaId::from(1)).unwrap().len(),
+				1
+			);
+
+			// We expect the scenario to have cores 0 & 1 with pending availability. The backed
+			// candidates are also created for cores 0 & 1. The pending available candidates will
+			// become available but the new candidates will not be backed since there is an upcoming
+			// session change.
+			let mut expected_para_inherent_data = scenario.data.clone();
+			expected_para_inherent_data.backed_candidates.clear();
+
+			// Check the para inherent data is as expected:
+			// * 1 bitfield per validator (2 validators)
+			assert_eq!(expected_para_inherent_data.bitfields.len(), 2);
+			// * 0 disputes.
+			assert_eq!(expected_para_inherent_data.disputes.len(), 0);
+			let mut inherent_data = InherentData::new();
+			inherent_data
+				.put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data)
+				.unwrap();
+			assert!(!scheduler::Pallet::<Test>::claim_queue_is_empty());
+
+			// Simulate a session change scheduled to happen at the end of the block.
+			initializer::BufferedSessionChanges::<Test>::put(vec![BufferedSessionChange {
+				validators: vec![],
+				queued: vec![],
+				session_index: 3,
+			}]);
+
+			// Only backed candidates are filtered out.
+			assert_eq!(
+				Pallet::<Test>::create_inherent_inner(&inherent_data.clone()).unwrap(),
+				expected_para_inherent_data
+			);
+
+			assert_eq!(
+				// No candidates backed.
+				OnChainVotes::<Test>::get().unwrap().backing_validators_per_candidate.len(),
+				0
+			);
+
+			assert_eq!(
+				// The session of the on chain votes should equal the current session, which is 2
+				OnChainVotes::<Test>::get().unwrap().session,
+				2
+			);
+
+			// No pending availability candidates.
+			assert_eq!(inclusion::PendingAvailability::<Test>::iter().count(), 2);
+			assert!(inclusion::PendingAvailability::<Test>::get(ParaId::from(0))
+				.unwrap()
+				.is_empty());
+			assert!(inclusion::PendingAvailability::<Test>::get(ParaId::from(1))
+				.unwrap()
+				.is_empty());
+
+			// The claim queue should not have been advanced.
+			assert_eq!(prev_claim_queue, scheduler::ClaimQueue::<Test>::get());
+		});
+	}
+
 	#[test]
 	fn test_session_is_tracked_in_on_chain_scraping() {
 		use crate::disputes::run_to_block;
@@ -633,7 +721,6 @@ mod enter {
 				backed_and_concluding,
 				num_validators_per_core: 5,
 				code_upgrade: None,
-				fill_claimqueue: false,
 				elastic_paras: BTreeMap::new(),
 				unavailable_cores: vec![],
 				v2_descriptor: false,
@@ -655,8 +742,7 @@ mod enter {
 				.put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data)
 				.unwrap();
 
-			// The current schedule is empty prior to calling `create_inherent_enter`.
-			assert!(scheduler::Pallet::<Test>::claim_queue_is_empty());
+			assert!(!scheduler::Pallet::<Test>::claim_queue_is_empty());
 
 			let multi_dispute_inherent_data =
 				Pallet::<Test>::create_inherent_inner(&inherent_data.clone()).unwrap();
@@ -671,6 +757,8 @@ mod enter {
 				&expected_para_inherent_data.disputes[..2],
 			);
 
+			clear_dispute_storage::<Test>();
+
 			assert_ok!(Pallet::<Test>::enter(
 				frame_system::RawOrigin::None.into(),
 				multi_dispute_inherent_data,
@@ -708,7 +796,6 @@ mod enter {
 				backed_and_concluding,
 				num_validators_per_core: 6,
 				code_upgrade: None,
-				fill_claimqueue: false,
 				elastic_paras: BTreeMap::new(),
 				unavailable_cores: vec![],
 				v2_descriptor: false,
@@ -729,8 +816,7 @@ mod enter {
 				.put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data)
 				.unwrap();
 
-			// The current schedule is empty prior to calling `create_inherent_enter`.
-			assert!(scheduler::Pallet::<Test>::claim_queue_is_empty());
+			assert!(!scheduler::Pallet::<Test>::claim_queue_is_empty());
 
 			let limit_inherent_data =
 				Pallet::<Test>::create_inherent_inner(&inherent_data.clone()).unwrap();
@@ -742,6 +828,8 @@ mod enter {
 			assert_eq!(limit_inherent_data.disputes[0].session, 1);
 			assert_eq!(limit_inherent_data.disputes[1].session, 2);
 
+			clear_dispute_storage::<Test>();
+
 			assert_ok!(Pallet::<Test>::enter(
 				frame_system::RawOrigin::None.into(),
 				limit_inherent_data,
@@ -781,7 +869,6 @@ mod enter {
 				backed_and_concluding,
 				num_validators_per_core: 4,
 				code_upgrade: None,
-				fill_claimqueue: false,
 				elastic_paras: BTreeMap::new(),
 				unavailable_cores: vec![],
 				v2_descriptor: false,
@@ -803,8 +890,7 @@ mod enter {
 				.put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data)
 				.unwrap();
 
-			// The current schedule is empty prior to calling `create_inherent_enter`.
-			assert!(scheduler::Pallet::<Test>::claim_queue_is_empty());
+			assert!(!scheduler::Pallet::<Test>::claim_queue_is_empty());
 
 			// Nothing is filtered out (including the backed candidates.)
 			let limit_inherent_data =
@@ -826,6 +912,8 @@ mod enter {
 			// over weight
 			assert_eq!(limit_inherent_data.backed_candidates.len(), 0);
 
+			clear_dispute_storage::<Test>();
+
 			assert_ok!(Pallet::<Test>::enter(
 				frame_system::RawOrigin::None.into(),
 				limit_inherent_data,
@@ -870,7 +958,6 @@ mod enter {
 				backed_and_concluding,
 				num_validators_per_core: 5,
 				code_upgrade: None,
-				fill_claimqueue: false,
 				elastic_paras: BTreeMap::new(),
 				unavailable_cores: vec![],
 				v2_descriptor: false,
@@ -892,10 +979,8 @@ mod enter {
 				.put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data)
 				.unwrap();
 
-			// The current schedule is empty prior to calling `create_inherent_enter`.
-			assert!(scheduler::Pallet::<Test>::claim_queue_is_empty());
+			assert!(!scheduler::Pallet::<Test>::claim_queue_is_empty());
 
-			// Nothing is filtered out (including the backed candidates.)
 			let limit_inherent_data =
 				Pallet::<Test>::create_inherent_inner(&inherent_data.clone()).unwrap();
 			assert_ne!(limit_inherent_data, expected_para_inherent_data);
@@ -916,9 +1001,11 @@ mod enter {
 			// over weight
 			assert_eq!(limit_inherent_data.backed_candidates.len(), 0);
 
+			clear_dispute_storage::<Test>();
+
 			assert_ok!(Pallet::<Test>::enter(
 				frame_system::RawOrigin::None.into(),
-				limit_inherent_data,
+				limit_inherent_data
 			));
 
 			assert_eq!(
@@ -959,7 +1046,6 @@ mod enter {
 				backed_and_concluding,
 				num_validators_per_core: 5,
 				code_upgrade: None,
-				fill_claimqueue: false,
 				elastic_paras: BTreeMap::new(),
 				unavailable_cores: vec![],
 				v2_descriptor: false,
@@ -1020,7 +1106,6 @@ mod enter {
 				backed_and_concluding,
 				num_validators_per_core,
 				code_upgrade: None,
-				fill_claimqueue: true,
 				elastic_paras: BTreeMap::new(),
 				unavailable_cores: vec![],
 				v2_descriptor: false,
@@ -1043,6 +1128,21 @@ mod enter {
 				Pallet::<Test>::create_inherent_inner(&inherent_data.clone()).unwrap();
 			assert!(limit_inherent_data == expected_para_inherent_data);
 
+			// Cores were scheduled. We should put the assignments back, before calling enter().
+			let cores = (0..num_candidates)
+				.into_iter()
+				.map(|i| {
+					// Load an assignment into provider so that one is present to pop
+					let assignment =
+						<Test as scheduler::Config>::AssignmentProvider::get_mock_assignment(
+							CoreIndex(i),
+							ParaId::from(i),
+						);
+					(CoreIndex(i), [assignment].into())
+				})
+				.collect();
+			scheduler::ClaimQueue::<Test>::set(cores);
+
 			assert_ok!(Pallet::<Test>::enter(
 				frame_system::RawOrigin::None.into(),
 				limit_inherent_data,
@@ -1108,7 +1208,6 @@ mod enter {
 				backed_and_concluding,
 				num_validators_per_core: 5,
 				code_upgrade: None,
-				fill_claimqueue: false,
 				elastic_paras: BTreeMap::new(),
 				unavailable_cores: vec![],
 				v2_descriptor: false,
@@ -1166,24 +1265,23 @@ mod enter {
 			);
 
 			// One core was scheduled. We should put the assignment back, before calling enter().
-			let now = frame_system::Pallet::<Test>::block_number() + 1;
 			let used_cores = 5;
 			let cores = (0..used_cores)
 				.into_iter()
 				.map(|i| {
-					let SchedulerParams { ttl, .. } =
-						configuration::ActiveConfig::<Test>::get().scheduler_params;
 					// Load an assignment into provider so that one is present to pop
 					let assignment =
 						<Test as scheduler::Config>::AssignmentProvider::get_mock_assignment(
 							CoreIndex(i),
 							ParaId::from(i),
 						);
-					(CoreIndex(i), [ParasEntry::new(assignment, now + ttl)].into())
+					(CoreIndex(i), [assignment].into())
 				})
 				.collect();
 			scheduler::ClaimQueue::<Test>::set(cores);
 
+			clear_dispute_storage::<Test>();
+
 			assert_ok!(Pallet::<Test>::enter(
 				frame_system::RawOrigin::None.into(),
 				limit_inherent_data,
@@ -1217,7 +1315,6 @@ mod enter {
 				backed_and_concluding,
 				num_validators_per_core: 5,
 				code_upgrade: None,
-				fill_claimqueue: false,
 				elastic_paras: BTreeMap::new(),
 				unavailable_cores: vec![],
 				v2_descriptor: false,
@@ -1287,7 +1384,6 @@ mod enter {
 				backed_and_concluding,
 				num_validators_per_core: 5,
 				code_upgrade: None,
-				fill_claimqueue: false,
 				elastic_paras: BTreeMap::new(),
 				unavailable_cores: vec![],
 				v2_descriptor: false,
@@ -1355,7 +1451,6 @@ mod enter {
 				backed_and_concluding,
 				num_validators_per_core: 5,
 				code_upgrade: None,
-				fill_claimqueue: false,
 				elastic_paras: BTreeMap::new(),
 				unavailable_cores: vec![],
 				v2_descriptor: false,
@@ -1446,7 +1541,6 @@ mod enter {
 	}
 
 	// Ensure that overweight parachain inherents are always rejected by the runtime.
-	// Runtime should panic and return `InherentOverweight` error.
 	#[rstest]
 	#[case(true)]
 	#[case(false)]
@@ -1479,7 +1573,6 @@ mod enter {
 				backed_and_concluding,
 				num_validators_per_core: 5,
 				code_upgrade: None,
-				fill_claimqueue: false,
 				elastic_paras: BTreeMap::new(),
 				unavailable_cores: vec![],
 				v2_descriptor,
@@ -1548,7 +1641,6 @@ mod enter {
 	}
 
 	// Ensure that overweight parachain inherents are always rejected by the runtime.
-	// Runtime should panic and return `InherentOverweight` error.
 	#[test]
 	fn inherent_create_weight_invariant() {
 		new_test_ext(MockGenesisConfig::default()).execute_with(|| {
@@ -1570,7 +1662,6 @@ mod enter {
 				backed_and_concluding,
 				num_validators_per_core: 5,
 				code_upgrade: None,
-				fill_claimqueue: false,
 				elastic_paras: BTreeMap::new(),
 				unavailable_cores: vec![],
 				v2_descriptor: false,
@@ -1600,7 +1691,7 @@ mod enter {
 			.unwrap_err()
 			.error;
 
-			assert_eq!(dispatch_error, Error::<Test>::InherentOverweight.into());
+			assert_eq!(dispatch_error, Error::<Test>::InherentDataFilteredDuringExecution.into());
 		});
 	}
 
@@ -1630,7 +1721,6 @@ mod enter {
 				backed_and_concluding,
 				num_validators_per_core: 5,
 				code_upgrade: None,
-				fill_claimqueue: true,
 				elastic_paras: [(2, 8)].into_iter().collect(),
 				unavailable_cores: unavailable_cores.clone(),
 				v2_descriptor: true,
@@ -1670,7 +1760,7 @@ mod enter {
 
 			// We expect `enter` to fail because the inherent data contains backed candidates with
 			// v2 descriptors.
-			assert_eq!(dispatch_error, Error::<Test>::CandidatesFilteredDuringExecution.into());
+			assert_eq!(dispatch_error, Error::<Test>::InherentDataFilteredDuringExecution.into());
 		});
 	}
 
@@ -1698,9 +1788,8 @@ mod enter {
 				dispute_statements: BTreeMap::new(),
 				dispute_sessions: vec![], // No disputes
 				backed_and_concluding,
-				num_validators_per_core: 5,
+				num_validators_per_core: 1,
 				code_upgrade: None,
-				fill_claimqueue: true,
 				elastic_paras: [(2, 8)].into_iter().collect(),
 				unavailable_cores: unavailable_cores.clone(),
 				v2_descriptor: true,
@@ -1719,8 +1808,8 @@ mod enter {
 			let unfiltered_para_inherent_data = scenario.data.clone();
 
 			// Check the para inherent data is as expected:
-			// * 1 bitfield per validator (5 validators per core, 10 backed candidates)
-			assert_eq!(unfiltered_para_inherent_data.bitfields.len(), 50);
+			// * 1 bitfield per validator (1 validators per core, 10 backed candidates)
+			assert_eq!(unfiltered_para_inherent_data.bitfields.len(), 10);
 			// * 10 v2 candidate descriptors.
 			assert_eq!(unfiltered_para_inherent_data.backed_candidates.len(), 10);
 
@@ -1738,7 +1827,7 @@ mod enter {
 
 			// We expect `enter` to fail because the inherent data contains backed candidates with
 			// v2 descriptors.
-			assert_eq!(dispatch_error, Error::<Test>::CandidatesFilteredDuringExecution.into());
+			assert_eq!(dispatch_error, Error::<Test>::InherentDataFilteredDuringExecution.into());
 		});
 	}
 
@@ -1766,9 +1855,8 @@ mod enter {
 				dispute_statements: BTreeMap::new(),
 				dispute_sessions: vec![], // No disputes
 				backed_and_concluding,
-				num_validators_per_core: 5,
+				num_validators_per_core: 1,
 				code_upgrade: None,
-				fill_claimqueue: true,
 				elastic_paras: [(2, 8)].into_iter().collect(),
 				unavailable_cores: unavailable_cores.clone(),
 				v2_descriptor: true,
@@ -1787,8 +1875,8 @@ mod enter {
 			let unfiltered_para_inherent_data = scenario.data.clone();
 
 			// Check the para inherent data is as expected:
-			// * 1 bitfield per validator (5 validators per core, 10 backed candidates)
-			assert_eq!(unfiltered_para_inherent_data.bitfields.len(), 50);
+			// * 1 bitfield per validator (1 validator per core, 10 backed candidates)
+			assert_eq!(unfiltered_para_inherent_data.bitfields.len(), 10);
 			// * 10 v2 candidate descriptors.
 			assert_eq!(unfiltered_para_inherent_data.backed_candidates.len(), 10);
 
@@ -1806,7 +1894,7 @@ mod enter {
 
 			// We expect `enter` to fail because the inherent data contains backed candidates with
 			// v2 descriptors.
-			assert_eq!(dispatch_error, Error::<Test>::CandidatesFilteredDuringExecution.into());
+			assert_eq!(dispatch_error, Error::<Test>::InherentDataFilteredDuringExecution.into());
 		});
 	}
 	#[test]
@@ -1843,7 +1931,6 @@ mod enter {
 				backed_and_concluding,
 				num_validators_per_core: 1,
 				code_upgrade: None,
-				fill_claimqueue: true,
 				elastic_paras: [(2, 3)].into_iter().collect(),
 				unavailable_cores: unavailable_cores.clone(),
 				v2_descriptor: true,
@@ -1898,7 +1985,6 @@ mod enter {
 				backed_and_concluding,
 				num_validators_per_core: 1,
 				code_upgrade: None,
-				fill_claimqueue: true,
 				elastic_paras: [(2, 3)].into_iter().collect(),
 				unavailable_cores: unavailable_cores.clone(),
 				v2_descriptor: true,
@@ -1985,7 +2071,6 @@ mod enter {
 				backed_and_concluding,
 				num_validators_per_core: 1,
 				code_upgrade: None,
-				fill_claimqueue: true,
 				elastic_paras: Default::default(),
 				unavailable_cores: unavailable_cores.clone(),
 				v2_descriptor: true,
@@ -2040,7 +2125,6 @@ mod enter {
 				backed_and_concluding,
 				num_validators_per_core: 1,
 				code_upgrade: None,
-				fill_claimqueue: true,
 				elastic_paras: [(2, 3)].into_iter().collect(),
 				unavailable_cores,
 				v2_descriptor: true,
@@ -2372,7 +2456,7 @@ mod sanitizers {
 	mod candidates {
 		use crate::{
 			mock::{set_disabled_validators, RuntimeOrigin},
-			scheduler::{common::Assignment, ParasEntry},
+			scheduler::common::Assignment,
 			util::{make_persisted_validation_data, make_persisted_validation_data_with_parent},
 		};
 		use alloc::collections::vec_deque::VecDeque;
@@ -2453,17 +2537,17 @@ mod sanitizers {
 			scheduler::Pallet::<Test>::set_claim_queue(BTreeMap::from([
 				(
 					CoreIndex::from(0),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(0) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 1.into(),
+						core_index: CoreIndex(0),
+					}]),
 				),
 				(
 					CoreIndex::from(1),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(1) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 2.into(),
+						core_index: CoreIndex(1),
+					}]),
 				),
 			]));
 
@@ -2545,7 +2629,7 @@ mod sanitizers {
 
 			// State sanity checks
 			assert_eq!(
-				scheduler::Pallet::<Test>::scheduled_paras().collect::<Vec<_>>(),
+				Pallet::<Test>::eligible_paras(&Default::default()).collect::<Vec<_>>(),
 				vec![(CoreIndex(0), ParaId::from(1)), (CoreIndex(1), ParaId::from(2))]
 			);
 			assert_eq!(
@@ -2641,73 +2725,73 @@ mod sanitizers {
 			scheduler::Pallet::<Test>::set_claim_queue(BTreeMap::from([
 				(
 					CoreIndex::from(0),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(0) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 1.into(),
+						core_index: CoreIndex(0),
+					}]),
 				),
 				(
 					CoreIndex::from(1),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(1) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 1.into(),
+						core_index: CoreIndex(1),
+					}]),
 				),
 				(
 					CoreIndex::from(2),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(2) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 2.into(),
+						core_index: CoreIndex(2),
+					}]),
 				),
 				(
 					CoreIndex::from(3),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(3) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 2.into(),
+						core_index: CoreIndex(3),
+					}]),
 				),
 				(
 					CoreIndex::from(4),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 3.into(), core_index: CoreIndex(4) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 3.into(),
+						core_index: CoreIndex(4),
+					}]),
 				),
 				(
 					CoreIndex::from(5),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 4.into(), core_index: CoreIndex(5) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 4.into(),
+						core_index: CoreIndex(5),
+					}]),
 				),
 				(
 					CoreIndex::from(6),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 5.into(), core_index: CoreIndex(6) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 5.into(),
+						core_index: CoreIndex(6),
+					}]),
 				),
 				(
 					CoreIndex::from(7),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 7.into(), core_index: CoreIndex(7) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 7.into(),
+						core_index: CoreIndex(7),
+					}]),
 				),
 				(
 					CoreIndex::from(8),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 7.into(), core_index: CoreIndex(8) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 7.into(),
+						core_index: CoreIndex(8),
+					}]),
 				),
 				(
 					CoreIndex::from(9),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 8.into(), core_index: CoreIndex(9) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 8.into(),
+						core_index: CoreIndex(9),
+					}]),
 				),
 			]));
 
@@ -3087,7 +3171,7 @@ mod sanitizers {
 
 			// State sanity checks
 			assert_eq!(
-				scheduler::Pallet::<Test>::scheduled_paras().collect::<Vec<_>>(),
+				Pallet::<Test>::eligible_paras(&Default::default()).collect::<Vec<_>>(),
 				vec![
 					(CoreIndex(0), ParaId::from(1)),
 					(CoreIndex(1), ParaId::from(1)),
@@ -3102,7 +3186,7 @@ mod sanitizers {
 				]
 			);
 			let mut scheduled: BTreeMap<ParaId, BTreeSet<CoreIndex>> = BTreeMap::new();
-			for (core_idx, para_id) in scheduler::Pallet::<Test>::scheduled_paras() {
+			for (core_idx, para_id) in Pallet::<Test>::eligible_paras(&Default::default()) {
 				scheduled.entry(para_id).or_default().insert(core_idx);
 			}
 
@@ -3186,66 +3270,66 @@ mod sanitizers {
 			scheduler::Pallet::<Test>::set_claim_queue(BTreeMap::from([
 				(
 					CoreIndex::from(0),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(0) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 1.into(),
+						core_index: CoreIndex(0),
+					}]),
 				),
 				(
 					CoreIndex::from(1),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(1) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 1.into(),
+						core_index: CoreIndex(1),
+					}]),
 				),
 				(
 					CoreIndex::from(2),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(2) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 2.into(),
+						core_index: CoreIndex(2),
+					}]),
 				),
 				(
 					CoreIndex::from(3),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(3) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 2.into(),
+						core_index: CoreIndex(3),
+					}]),
 				),
 				(
 					CoreIndex::from(4),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(4) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 2.into(),
+						core_index: CoreIndex(4),
+					}]),
 				),
 				(
 					CoreIndex::from(5),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 3.into(), core_index: CoreIndex(5) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 3.into(),
+						core_index: CoreIndex(5),
+					}]),
 				),
 				(
 					CoreIndex::from(6),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 3.into(), core_index: CoreIndex(6) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 3.into(),
+						core_index: CoreIndex(6),
+					}]),
 				),
 				(
 					CoreIndex::from(7),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 4.into(), core_index: CoreIndex(7) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 4.into(),
+						core_index: CoreIndex(7),
+					}]),
 				),
 				(
 					CoreIndex::from(8),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 4.into(), core_index: CoreIndex(8) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 4.into(),
+						core_index: CoreIndex(8),
+					}]),
 				),
 			]));
 
@@ -3575,7 +3659,7 @@ mod sanitizers {
 
 			// State sanity checks
 			assert_eq!(
-				scheduler::Pallet::<Test>::scheduled_paras().collect::<Vec<_>>(),
+				Pallet::<Test>::eligible_paras(&Default::default()).collect::<Vec<_>>(),
 				vec![
 					(CoreIndex(0), ParaId::from(1)),
 					(CoreIndex(1), ParaId::from(1)),
@@ -3589,7 +3673,7 @@ mod sanitizers {
 				]
 			);
 			let mut scheduled: BTreeMap<ParaId, BTreeSet<CoreIndex>> = BTreeMap::new();
-			for (core_idx, para_id) in scheduler::Pallet::<Test>::scheduled_paras() {
+			for (core_idx, para_id) in Pallet::<Test>::eligible_paras(&Default::default()) {
 				scheduled.entry(para_id).or_default().insert(core_idx);
 			}
 
@@ -3710,45 +3794,45 @@ mod sanitizers {
 			scheduler::Pallet::<Test>::set_claim_queue(BTreeMap::from([
 				(
 					CoreIndex::from(0),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(0) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 1.into(),
+						core_index: CoreIndex(0),
+					}]),
 				),
 				(
 					CoreIndex::from(1),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(1) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 1.into(),
+						core_index: CoreIndex(1),
+					}]),
 				),
 				(
 					CoreIndex::from(2),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(2) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 1.into(),
+						core_index: CoreIndex(2),
+					}]),
 				),
 				(
 					CoreIndex::from(3),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(3) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 2.into(),
+						core_index: CoreIndex(3),
+					}]),
 				),
 				(
 					CoreIndex::from(4),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(4) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 2.into(),
+						core_index: CoreIndex(4),
+					}]),
 				),
 				(
 					CoreIndex::from(5),
-					VecDeque::from([ParasEntry::new(
-						Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(5) },
-						RELAY_PARENT_NUM,
-					)]),
+					VecDeque::from([Assignment::Pool {
+						para_id: 2.into(),
+						core_index: CoreIndex(5),
+					}]),
 				),
 			]));
 
@@ -3996,7 +4080,7 @@ mod sanitizers {
 
 			// State sanity checks
 			assert_eq!(
-				scheduler::Pallet::<Test>::scheduled_paras().collect::<Vec<_>>(),
+				Pallet::<Test>::eligible_paras(&Default::default()).collect::<Vec<_>>(),
 				vec![
 					(CoreIndex(0), ParaId::from(1)),
 					(CoreIndex(1), ParaId::from(1)),
@@ -4007,7 +4091,7 @@ mod sanitizers {
 				]
 			);
 			let mut scheduled: BTreeMap<ParaId, BTreeSet<CoreIndex>> = BTreeMap::new();
-			for (core_idx, para_id) in scheduler::Pallet::<Test>::scheduled_paras() {
+			for (core_idx, para_id) in Pallet::<Test>::eligible_paras(&Default::default()) {
 				scheduled.entry(para_id).or_default().insert(core_idx);
 			}
 
diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v11.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v11.rs
index a0996d5df0eec2eade802bf0980e1b4c49c3ce17..e9327bc7641a3602eaaf7096cbd16d2d6e24b780 100644
--- a/polkadot/runtime/parachains/src/runtime_api_impl/v11.rs
+++ b/polkadot/runtime/parachains/src/runtime_api_impl/v11.rs
@@ -18,8 +18,7 @@
 //! functions.
 
 use crate::{
-	configuration, disputes, dmp, hrmp, inclusion, initializer, paras, paras_inherent,
-	scheduler::{self, CoreOccupied},
+	configuration, disputes, dmp, hrmp, inclusion, initializer, paras, paras_inherent, scheduler,
 	session_info, shared,
 };
 use alloc::{
@@ -67,15 +66,6 @@ pub fn validator_groups<T: initializer::Config>(
 
 /// Implementation for the `availability_cores` function of the runtime API.
 pub fn availability_cores<T: initializer::Config>() -> Vec<CoreState<T::Hash, BlockNumberFor<T>>> {
-	let cores = scheduler::AvailabilityCores::<T>::get();
-	let now = frame_system::Pallet::<T>::block_number() + One::one();
-
-	// This explicit update is only strictly required for session boundaries:
-	//
-	// At the end of a session we clear the claim queues: Without this update call, nothing would be
-	// scheduled to the client.
-	scheduler::Pallet::<T>::free_cores_and_fill_claim_queue(Vec::new(), now);
-
 	let time_out_for = scheduler::Pallet::<T>::availability_timeout_predicate();
 
 	let group_responsible_for =
@@ -95,76 +85,42 @@ pub fn availability_cores<T: initializer::Config>() -> Vec<CoreState<T::Hash, Bl
 			},
 		};
 
-	let scheduled: BTreeMap<_, _> = scheduler::Pallet::<T>::scheduled_paras().collect();
-
-	cores
-		.into_iter()
-		.enumerate()
-		.map(|(i, core)| match core {
-			CoreOccupied::Paras(entry) => {
-				// Due to https://github.com/paritytech/polkadot-sdk/issues/64, using the new storage types would cause
-				// this runtime API to panic. We explicitly handle the storage for version 0 to
-				// prevent that. When removing the inclusion v0 -> v1 migration, this bit of code
-				// can also be removed.
-				let pending_availability = if inclusion::Pallet::<T>::on_chain_storage_version() ==
-					StorageVersion::new(0)
-				{
-					inclusion::migration::v0::PendingAvailability::<T>::get(entry.para_id())
-						.expect("Occupied core always has pending availability; qed")
-				} else {
-					let candidate = inclusion::Pallet::<T>::pending_availability_with_core(
-						entry.para_id(),
-						CoreIndex(i as u32),
-					)
-					.expect("Occupied core always has pending availability; qed");
-
-					// Translate to the old candidate format, as we don't need the commitments now.
-					inclusion::migration::v0::CandidatePendingAvailability {
-						core: candidate.core_occupied(),
-						hash: candidate.candidate_hash(),
-						descriptor: candidate.candidate_descriptor().clone(),
-						availability_votes: candidate.availability_votes().clone(),
-						backers: candidate.backers().clone(),
-						relay_parent_number: candidate.relay_parent_number(),
-						backed_in_number: candidate.backed_in_number(),
-						backing_group: candidate.backing_group(),
-					}
-				};
-
-				let backed_in_number = pending_availability.backed_in_number;
+	let claim_queue = scheduler::Pallet::<T>::get_claim_queue();
+	let occupied_cores: BTreeMap<CoreIndex, inclusion::CandidatePendingAvailability<_, _>> =
+		inclusion::Pallet::<T>::get_occupied_cores().collect();
+	let n_cores = scheduler::Pallet::<T>::num_availability_cores();
 
+	(0..n_cores)
+		.map(|core_idx| {
+			let core_idx = CoreIndex(core_idx as u32);
+			if let Some(pending_availability) = occupied_cores.get(&core_idx) {
 				// Use the same block number for determining the responsible group as what the
 				// backing subsystem would use when it calls validator_groups api.
 				let backing_group_allocation_time =
-					pending_availability.relay_parent_number + One::one();
+					pending_availability.relay_parent_number() + One::one();
 				CoreState::Occupied(OccupiedCore {
-					next_up_on_available: scheduler::Pallet::<T>::next_up_on_available(CoreIndex(
-						i as u32,
-					)),
-					occupied_since: backed_in_number,
-					time_out_at: time_out_for(backed_in_number).live_until,
-					next_up_on_time_out: scheduler::Pallet::<T>::next_up_on_time_out(CoreIndex(
-						i as u32,
-					)),
-					availability: pending_availability.availability_votes.clone(),
+					next_up_on_available: scheduler::Pallet::<T>::next_up_on_available(core_idx),
+					occupied_since: pending_availability.backed_in_number(),
+					time_out_at: time_out_for(pending_availability.backed_in_number()).live_until,
+					next_up_on_time_out: scheduler::Pallet::<T>::next_up_on_available(core_idx),
+					availability: pending_availability.availability_votes().clone(),
 					group_responsible: group_responsible_for(
 						backing_group_allocation_time,
-						pending_availability.core,
+						pending_availability.core_occupied(),
 					),
-					candidate_hash: pending_availability.hash,
-					candidate_descriptor: pending_availability.descriptor,
+					candidate_hash: pending_availability.candidate_hash(),
+					candidate_descriptor: pending_availability.candidate_descriptor().clone(),
 				})
-			},
-			CoreOccupied::Free => {
-				if let Some(para_id) = scheduled.get(&CoreIndex(i as _)).cloned() {
+			} else {
+				if let Some(assignment) = claim_queue.get(&core_idx).and_then(|q| q.front()) {
 					CoreState::Scheduled(polkadot_primitives::ScheduledCore {
-						para_id,
+						para_id: assignment.para_id(),
 						collator: None,
 					})
 				} else {
 					CoreState::Free
 				}
-			},
+			}
 		})
 		.collect()
 }
@@ -195,13 +151,12 @@ where
 			build()
 		},
 		OccupiedCoreAssumption::TimedOut => build(),
-		OccupiedCoreAssumption::Free => {
-			if <inclusion::Pallet<Config>>::pending_availability(para_id).is_some() {
+		OccupiedCoreAssumption::Free =>
+			if !<inclusion::Pallet<Config>>::candidates_pending_availability(para_id).is_empty() {
 				None
 			} else {
 				build()
-			}
-		},
+			},
 	}
 }
 
@@ -240,10 +195,12 @@ pub fn assumed_validation_data<T: initializer::Config>(
 	let persisted_validation_data = make_validation_data().or_else(|| {
 		// Try again with force enacting the pending candidates. This check only makes sense if
 		// there are any pending candidates.
-		inclusion::Pallet::<T>::pending_availability(para_id).and_then(|_| {
-			inclusion::Pallet::<T>::force_enact(para_id);
-			make_validation_data()
-		})
+		(!inclusion::Pallet::<T>::candidates_pending_availability(para_id).is_empty())
+			.then_some(())
+			.and_then(|_| {
+				inclusion::Pallet::<T>::force_enact(para_id);
+				make_validation_data()
+			})
 	});
 	// If we were successful, also query current validation code hash.
 	persisted_validation_data.zip(paras::CurrentCodeHash::<T>::get(&para_id))
@@ -319,7 +276,7 @@ pub fn validation_code<T: initializer::Config>(
 pub fn candidate_pending_availability<T: initializer::Config>(
 	para_id: ParaId,
 ) -> Option<CommittedCandidateReceipt<T::Hash>> {
-	inclusion::Pallet::<T>::candidate_pending_availability(para_id)
+	inclusion::Pallet::<T>::first_candidate_pending_availability(para_id)
 }
 
 /// Implementation for the `candidate_events` function of the runtime API.
@@ -568,23 +525,12 @@ pub fn approval_voting_params<T: initializer::Config>() -> ApprovalVotingParams
 
 /// Returns the claimqueue from the scheduler
 pub fn claim_queue<T: scheduler::Config>() -> BTreeMap<CoreIndex, VecDeque<ParaId>> {
-	let now = <frame_system::Pallet<T>>::block_number() + One::one();
-
-	// This is needed so that the claim queue always has the right size (equal to
-	// scheduling_lookahead). Otherwise, if a candidate is backed in the same block where the
-	// previous candidate is included, the claim queue will have already pop()-ed the next item
-	// from the queue and the length would be `scheduling_lookahead - 1`.
-	<scheduler::Pallet<T>>::free_cores_and_fill_claim_queue(Vec::new(), now);
 	let config = configuration::ActiveConfig::<T>::get();
 	// Extra sanity, config should already never be smaller than 1:
 	let n_lookahead = config.scheduler_params.lookahead.max(1);
-
-	scheduler::ClaimQueue::<T>::get()
+	scheduler::Pallet::<T>::get_claim_queue()
 		.into_iter()
 		.map(|(core_index, entries)| {
-			// on cores timing out internal claim queue size may be temporarily longer than it
-			// should be as the timed out assignment might got pushed back to an already full claim
-			// queue:
 			(
 				core_index,
 				entries.into_iter().map(|e| e.para_id()).take(n_lookahead as usize).collect(),
diff --git a/polkadot/runtime/parachains/src/scheduler.rs b/polkadot/runtime/parachains/src/scheduler.rs
index 445583d929aba9a82322f8fb94a70d38632a4f8f..329df3a8a9deb3c06a337d9cf3e4e43e55c100ae 100644
--- a/polkadot/runtime/parachains/src/scheduler.rs
+++ b/polkadot/runtime/parachains/src/scheduler.rs
@@ -36,14 +36,9 @@
 //! number of groups as availability cores. Validator groups will be assigned to different
 //! availability cores over time.
 
-use core::iter::Peekable;
-
 use crate::{configuration, initializer::SessionChangeNotification, paras};
 use alloc::{
-	collections::{
-		btree_map::{self, BTreeMap},
-		vec_deque::VecDeque,
-	},
+	collections::{btree_map::BTreeMap, btree_set::BTreeSet, vec_deque::VecDeque},
 	vec::Vec,
 };
 use frame_support::{pallet_prelude::*, traits::Defensive};
@@ -71,7 +66,7 @@ pub mod migration;
 pub mod pallet {
 	use super::*;
 
-	const STORAGE_VERSION: StorageVersion = StorageVersion::new(2);
+	const STORAGE_VERSION: StorageVersion = StorageVersion::new(3);
 
 	#[pallet::pallet]
 	#[pallet::without_storage_info]
@@ -93,47 +88,6 @@ pub mod pallet {
 	#[pallet::storage]
 	pub type ValidatorGroups<T> = StorageValue<_, Vec<Vec<ValidatorIndex>>, ValueQuery>;
 
-	/// One entry for each availability core. The i'th parachain belongs to the i'th core, with the
-	/// remaining cores all being on demand parachain multiplexers.
-	///
-	/// Bounded by the maximum of either of these two values:
-	///   * The number of parachains and parathread multiplexers
-	///   * The number of validators divided by `configuration.max_validators_per_core`.
-	#[pallet::storage]
-	pub type AvailabilityCores<T: Config> = StorageValue<_, Vec<CoreOccupiedType<T>>, ValueQuery>;
-
-	/// Representation of a core in `AvailabilityCores`.
-	///
-	/// This is not to be confused with `CoreState` which is an enriched variant of this and exposed
-	/// to the node side. It also provides information about scheduled/upcoming assignments for
-	/// example and is computed on the fly in the `availability_cores` runtime call.
-	#[derive(Encode, Decode, TypeInfo, RuntimeDebug, PartialEq)]
-	pub enum CoreOccupied<N> {
-		/// No candidate is waiting availability on this core right now (the core is not occupied).
-		Free,
-		/// A para is currently waiting for availability/inclusion on this core.
-		Paras(ParasEntry<N>),
-	}
-
-	/// Convenience type alias for `CoreOccupied`.
-	pub type CoreOccupiedType<T> = CoreOccupied<BlockNumberFor<T>>;
-
-	impl<N> CoreOccupied<N> {
-		/// Is core free?
-		pub fn is_free(&self) -> bool {
-			matches!(self, Self::Free)
-		}
-	}
-
-	/// Reasons a core might be freed.
-	#[derive(Clone, Copy)]
-	pub enum FreedReason {
-		/// The core's work concluded and the parablock assigned to it is considered available.
-		Concluded,
-		/// The core's work timed out.
-		TimedOut,
-	}
-
 	/// The block number where the session start occurred. Used to track how many group rotations
 	/// have occurred.
 	///
@@ -145,40 +99,9 @@ pub mod pallet {
 	pub type SessionStartBlock<T: Config> = StorageValue<_, BlockNumberFor<T>, ValueQuery>;
 
 	/// One entry for each availability core. The `VecDeque` represents the assignments to be
-	/// scheduled on that core. The value contained here will not be valid after the end of
-	/// a block. Runtime APIs should be used to determine scheduled cores for the upcoming block.
+	/// scheduled on that core.
 	#[pallet::storage]
-	pub type ClaimQueue<T: Config> =
-		StorageValue<_, BTreeMap<CoreIndex, VecDeque<ParasEntryType<T>>>, ValueQuery>;
-
-	/// Assignments as tracked in the claim queue.
-	#[derive(Encode, Decode, TypeInfo, RuntimeDebug, PartialEq, Clone)]
-	pub struct ParasEntry<N> {
-		/// The underlying [`Assignment`].
-		pub assignment: Assignment,
-		/// The number of times the entry has timed out in availability already.
-		pub availability_timeouts: u32,
-		/// The block height until this entry needs to be backed.
-		///
-		/// If missed the entry will be removed from the claim queue without ever having occupied
-		/// the core.
-		pub ttl: N,
-	}
-
-	/// Convenience type declaration for `ParasEntry`.
-	pub type ParasEntryType<T> = ParasEntry<BlockNumberFor<T>>;
-
-	impl<N> ParasEntry<N> {
-		/// Create a new `ParasEntry`.
-		pub fn new(assignment: Assignment, now: N) -> Self {
-			ParasEntry { assignment, availability_timeouts: 0, ttl: now }
-		}
-
-		/// Return `Id` from the underlying `Assignment`.
-		pub fn para_id(&self) -> ParaId {
-			self.assignment.para_id()
-		}
-	}
+	pub type ClaimQueue<T> = StorageValue<_, BTreeMap<CoreIndex, VecDeque<Assignment>>, ValueQuery>;
 
 	/// Availability timeout status of a core.
 	pub(crate) struct AvailabilityTimeoutStatus<BlockNumber> {
@@ -195,30 +118,6 @@ pub mod pallet {
 	}
 }
 
-type PositionInClaimQueue = u32;
-
-struct ClaimQueueIterator<E> {
-	next_idx: u32,
-	queue: Peekable<btree_map::IntoIter<CoreIndex, VecDeque<E>>>,
-}
-
-impl<E> Iterator for ClaimQueueIterator<E> {
-	type Item = (CoreIndex, VecDeque<E>);
-
-	fn next(&mut self) -> Option<Self::Item> {
-		let (idx, _) = self.queue.peek()?;
-		let val = if idx != &CoreIndex(self.next_idx) {
-			log::trace!(target: LOG_TARGET, "idx did not match claim queue idx: {:?} vs {:?}", idx, self.next_idx);
-			(CoreIndex(self.next_idx), VecDeque::new())
-		} else {
-			let (idx, q) = self.queue.next()?;
-			(idx, q)
-		};
-		self.next_idx += 1;
-		Some(val)
-	}
-}
-
 impl<T: Config> Pallet<T> {
 	/// Called by the initializer to initialize the scheduler pallet.
 	pub(crate) fn initializer_initialize(_now: BlockNumberFor<T>) -> Weight {
@@ -228,31 +127,22 @@ impl<T: Config> Pallet<T> {
 	/// Called by the initializer to finalize the scheduler pallet.
 	pub(crate) fn initializer_finalize() {}
 
-	/// Called before the initializer notifies of a new session.
-	pub(crate) fn pre_new_session() {
-		Self::push_claim_queue_items_to_assignment_provider();
-		Self::push_occupied_cores_to_assignment_provider();
-	}
-
 	/// Called by the initializer to note that a new session has started.
 	pub(crate) fn initializer_on_new_session(
 		notification: &SessionChangeNotification<BlockNumberFor<T>>,
 	) {
-		let SessionChangeNotification { validators, new_config, .. } = notification;
+		let SessionChangeNotification { validators, new_config, prev_config, .. } = notification;
 		let config = new_config;
+		let assigner_cores = config.scheduler_params.num_cores;
 
 		let n_cores = core::cmp::max(
-			T::AssignmentProvider::session_core_count(),
+			assigner_cores,
 			match config.scheduler_params.max_validators_per_core {
 				Some(x) if x != 0 => validators.len() as u32 / x,
 				_ => 0,
 			},
 		);
 
-		AvailabilityCores::<T>::mutate(|cores| {
-			cores.resize_with(n_cores as _, || CoreOccupied::Free);
-		});
-
 		// shuffle validators into groups.
 		if n_cores == 0 || validators.is_empty() {
 			ValidatorGroups::<T>::set(Vec::new());
@@ -295,151 +185,24 @@ impl<T: Config> Pallet<T> {
 			ValidatorGroups::<T>::set(groups);
 		}
 
+		// Resize and populate claim queue.
+		Self::maybe_resize_claim_queue(prev_config.scheduler_params.num_cores, assigner_cores);
+		Self::populate_claim_queue_after_session_change();
+
 		let now = frame_system::Pallet::<T>::block_number() + One::one();
 		SessionStartBlock::<T>::set(now);
 	}
 
-	/// Free unassigned cores. Provide a list of cores that should be considered newly-freed along
-	/// with the reason for them being freed. Returns a tuple of concluded and timedout paras.
-	fn free_cores(
-		just_freed_cores: impl IntoIterator<Item = (CoreIndex, FreedReason)>,
-	) -> (BTreeMap<CoreIndex, Assignment>, BTreeMap<CoreIndex, ParasEntryType<T>>) {
-		let mut timedout_paras: BTreeMap<CoreIndex, ParasEntryType<T>> = BTreeMap::new();
-		let mut concluded_paras = BTreeMap::new();
-
-		AvailabilityCores::<T>::mutate(|cores| {
-			let c_len = cores.len();
-
-			just_freed_cores
-				.into_iter()
-				.filter(|(freed_index, _)| (freed_index.0 as usize) < c_len)
-				.for_each(|(freed_index, freed_reason)| {
-					match core::mem::replace(&mut cores[freed_index.0 as usize], CoreOccupied::Free)
-					{
-						CoreOccupied::Free => {},
-						CoreOccupied::Paras(entry) => {
-							match freed_reason {
-								FreedReason::Concluded => {
-									concluded_paras.insert(freed_index, entry.assignment);
-								},
-								FreedReason::TimedOut => {
-									timedout_paras.insert(freed_index, entry);
-								},
-							};
-						},
-					};
-				})
-		});
-
-		(concluded_paras, timedout_paras)
-	}
-
-	/// Get an iterator into the claim queues.
-	///
-	/// This iterator will have an item for each and every core index up to the maximum core index
-	/// found in the claim queue. In other words there will be no holes/missing core indices,
-	/// between core 0 and the maximum, even if the claim queue was missing entries for particular
-	/// indices in between. (The iterator will return an empty `VecDeque` for those indices.
-	fn claim_queue_iterator() -> impl Iterator<Item = (CoreIndex, VecDeque<ParasEntryType<T>>)> {
-		let queues = ClaimQueue::<T>::get();
-		return ClaimQueueIterator::<ParasEntryType<T>> {
-			next_idx: 0,
-			queue: queues.into_iter().peekable(),
-		}
-	}
-
-	/// Note that the given cores have become occupied. Update the claim queue accordingly.
-	/// This will not push a new entry onto the claim queue, so the length after this call will be
-	/// the expected length - 1. The claim_queue runtime API will take care of adding another entry
-	/// here, to ensure the right lookahead.
-	pub(crate) fn occupied(
-		now_occupied: BTreeMap<CoreIndex, ParaId>,
-	) -> BTreeMap<CoreIndex, PositionInClaimQueue> {
-		let mut availability_cores = AvailabilityCores::<T>::get();
-
-		log::debug!(target: LOG_TARGET, "[occupied] now_occupied {:?}", now_occupied);
-
-		let pos_mapping: BTreeMap<CoreIndex, PositionInClaimQueue> = now_occupied
-			.iter()
-			.flat_map(|(core_idx, para_id)| {
-				match Self::remove_from_claim_queue(*core_idx, *para_id) {
-					Err(e) => {
-						log::debug!(
-							target: LOG_TARGET,
-							"[occupied] error on remove_from_claim queue {}",
-							e
-						);
-						None
-					},
-					Ok((pos_in_claim_queue, pe)) => {
-						availability_cores[core_idx.0 as usize] = CoreOccupied::Paras(pe);
-
-						Some((*core_idx, pos_in_claim_queue))
-					},
-				}
-			})
-			.collect();
-
-		// Drop expired claims after processing now_occupied.
-		Self::drop_expired_claims_from_claim_queue();
-
-		AvailabilityCores::<T>::set(availability_cores);
-
-		pos_mapping
-	}
-
-	/// Iterates through every element in all claim queues and tries to add new assignments from the
-	/// `AssignmentProvider`. A claim is considered expired if it's `ttl` field is lower than the
-	/// current block height.
-	fn drop_expired_claims_from_claim_queue() {
-		let now = frame_system::Pallet::<T>::block_number();
-		let availability_cores = AvailabilityCores::<T>::get();
-		let ttl = configuration::ActiveConfig::<T>::get().scheduler_params.ttl;
-
-		ClaimQueue::<T>::mutate(|cq| {
-			for (idx, _) in (0u32..).zip(availability_cores) {
-				let core_idx = CoreIndex(idx);
-				if let Some(core_claim_queue) = cq.get_mut(&core_idx) {
-					let mut i = 0;
-					let mut num_dropped = 0;
-					while i < core_claim_queue.len() {
-						let maybe_dropped = if let Some(entry) = core_claim_queue.get(i) {
-							if entry.ttl < now {
-								core_claim_queue.remove(i)
-							} else {
-								None
-							}
-						} else {
-							None
-						};
-
-						if let Some(dropped) = maybe_dropped {
-							num_dropped += 1;
-							T::AssignmentProvider::report_processed(dropped.assignment);
-						} else {
-							i += 1;
-						}
-					}
-
-					for _ in 0..num_dropped {
-						// For all claims dropped due to TTL, attempt to pop a new entry to
-						// the back of the claim queue.
-						if let Some(assignment) =
-							T::AssignmentProvider::pop_assignment_for_core(core_idx)
-						{
-							core_claim_queue.push_back(ParasEntry::new(assignment, now + ttl));
-						}
-					}
-				}
-			}
-		});
-	}
-
 	/// Get the validators in the given group, if the group index is valid for this session.
 	pub(crate) fn group_validators(group_index: GroupIndex) -> Option<Vec<ValidatorIndex>> {
 		ValidatorGroups::<T>::get().get(group_index.0 as usize).map(|g| g.clone())
 	}
 
+	/// Get the number of cores.
+	pub(crate) fn num_availability_cores() -> usize {
+		ValidatorGroups::<T>::decode_len().unwrap_or(0)
+	}
+
 	/// Get the group assigned to a specific core by index at the current block number. Result
 	/// undefined if the core index is unknown or the block number is less than the session start
 	/// index.
@@ -531,183 +294,137 @@ impl<T: Config> Pallet<T> {
 	/// Return the next thing that will be scheduled on this core assuming it is currently
 	/// occupied and the candidate occupying it became available.
 	pub(crate) fn next_up_on_available(core: CoreIndex) -> Option<ScheduledCore> {
-		ClaimQueue::<T>::get()
-			.get(&core)
-			.and_then(|a| a.front().map(|pe| Self::paras_entry_to_scheduled_core(pe)))
+		// Since this is being called from a runtime API, we need to workaround for #64.
+		if Self::on_chain_storage_version() == StorageVersion::new(2) {
+			migration::v2::ClaimQueue::<T>::get()
+				.get(&core)
+				.and_then(|a| a.front().map(|entry| entry.assignment.para_id()))
+		} else {
+			ClaimQueue::<T>::get()
+				.get(&core)
+				.and_then(|a| a.front().map(|assignment| assignment.para_id()))
+		}
+		.map(|para_id| ScheduledCore { para_id, collator: None })
 	}
 
-	fn paras_entry_to_scheduled_core(pe: &ParasEntryType<T>) -> ScheduledCore {
-		ScheduledCore { para_id: pe.para_id(), collator: None }
+	// Since this is being called from a runtime API, we need to workaround for #64.
+	pub(crate) fn get_claim_queue() -> BTreeMap<CoreIndex, VecDeque<Assignment>> {
+		if Self::on_chain_storage_version() == StorageVersion::new(2) {
+			migration::v2::ClaimQueue::<T>::get()
+				.into_iter()
+				.map(|(core_index, entries)| {
+					(core_index, entries.into_iter().map(|e| e.assignment).collect())
+				})
+				.collect()
+		} else {
+			ClaimQueue::<T>::get()
+		}
 	}
 
-	/// Return the next thing that will be scheduled on this core assuming it is currently
-	/// occupied and the candidate occupying it times out.
-	pub(crate) fn next_up_on_time_out(core: CoreIndex) -> Option<ScheduledCore> {
-		let max_availability_timeouts = configuration::ActiveConfig::<T>::get()
-			.scheduler_params
-			.max_availability_timeouts;
-		Self::next_up_on_available(core).or_else(|| {
-			// Or, if none, the claim currently occupying the core,
-			// as it would be put back on the queue after timing out if number of retries is not at
-			// the maximum.
-			let cores = AvailabilityCores::<T>::get();
-			cores.get(core.0 as usize).and_then(|c| match c {
-				CoreOccupied::Free => None,
-				CoreOccupied::Paras(pe) =>
-					if pe.availability_timeouts < max_availability_timeouts {
-						Some(Self::paras_entry_to_scheduled_core(pe))
-					} else {
-						None
-					},
-			})
-		})
-	}
+	/// For each core that isn't part of the `except_for` set, pop the first item of the claim queue
+	/// and fill the queue from the assignment provider.
+	pub(crate) fn advance_claim_queue(except_for: &BTreeSet<CoreIndex>) {
+		let config = configuration::ActiveConfig::<T>::get();
+		let num_assigner_cores = config.scheduler_params.num_cores;
+		// Extra sanity, config should already never be smaller than 1:
+		let n_lookahead = config.scheduler_params.lookahead.max(1);
+
+		for core_idx in 0..num_assigner_cores {
+			let core_idx = CoreIndex::from(core_idx);
+
+			if !except_for.contains(&core_idx) {
+				let core_idx = CoreIndex::from(core_idx);
 
-	/// Pushes occupied cores to the assignment provider.
-	fn push_occupied_cores_to_assignment_provider() {
-		AvailabilityCores::<T>::mutate(|cores| {
-			for core in cores.iter_mut() {
-				match core::mem::replace(core, CoreOccupied::Free) {
-					CoreOccupied::Free => continue,
-					CoreOccupied::Paras(entry) => {
-						Self::maybe_push_assignment(entry);
-					},
+				if let Some(dropped_para) = Self::pop_front_of_claim_queue(&core_idx) {
+					T::AssignmentProvider::report_processed(dropped_para);
 				}
-			}
-		});
-	}
 
-	// on new session
-	fn push_claim_queue_items_to_assignment_provider() {
-		for (_, claim_queue) in ClaimQueue::<T>::take() {
-			// Push back in reverse order so that when we pop from the provider again,
-			// the entries in the claim queue are in the same order as they are right now.
-			for para_entry in claim_queue.into_iter().rev() {
-				Self::maybe_push_assignment(para_entry);
+				Self::fill_claim_queue(core_idx, n_lookahead);
 			}
 		}
 	}
 
-	/// Push assignments back to the provider on session change unless the paras
-	/// timed out on availability before.
-	fn maybe_push_assignment(pe: ParasEntryType<T>) {
-		if pe.availability_timeouts == 0 {
-			T::AssignmentProvider::push_back_assignment(pe.assignment);
+	// on new session
+	fn maybe_resize_claim_queue(old_core_count: u32, new_core_count: u32) {
+		if new_core_count < old_core_count {
+			ClaimQueue::<T>::mutate(|cq| {
+				let to_remove: Vec<_> = cq
+					.range(CoreIndex(new_core_count)..CoreIndex(old_core_count))
+					.map(|(k, _)| *k)
+					.collect();
+				for key in to_remove {
+					if let Some(dropped_assignments) = cq.remove(&key) {
+						Self::push_back_to_assignment_provider(dropped_assignments.into_iter());
+					}
+				}
+			});
 		}
 	}
 
-	/// Frees cores and fills the free claim queue spots by popping from the `AssignmentProvider`.
-	pub fn free_cores_and_fill_claim_queue(
-		just_freed_cores: impl IntoIterator<Item = (CoreIndex, FreedReason)>,
-		now: BlockNumberFor<T>,
-	) {
-		let (mut concluded_paras, mut timedout_paras) = Self::free_cores(just_freed_cores);
-
-		// This can only happen on new sessions at which we move all assignments back to the
-		// provider. Hence, there's nothing we need to do here.
-		if ValidatorGroups::<T>::decode_len().map_or(true, |l| l == 0) {
-			return
-		}
-		let n_session_cores = T::AssignmentProvider::session_core_count();
-		let cq = ClaimQueue::<T>::get();
+	// Populate the claim queue. To be called on new session, after all the other modules were
+	// initialized.
+	fn populate_claim_queue_after_session_change() {
 		let config = configuration::ActiveConfig::<T>::get();
 		// Extra sanity, config should already never be smaller than 1:
 		let n_lookahead = config.scheduler_params.lookahead.max(1);
-		let max_availability_timeouts = config.scheduler_params.max_availability_timeouts;
-		let ttl = config.scheduler_params.ttl;
+		let new_core_count = config.scheduler_params.num_cores;
 
-		for core_idx in 0..n_session_cores {
+		for core_idx in 0..new_core_count {
 			let core_idx = CoreIndex::from(core_idx);
+			Self::fill_claim_queue(core_idx, n_lookahead);
+		}
+	}
 
-			let n_lookahead_used = cq.get(&core_idx).map_or(0, |v| v.len() as u32);
-
-			// add previously timedout paras back into the queue
-			if let Some(mut entry) = timedout_paras.remove(&core_idx) {
-				if entry.availability_timeouts < max_availability_timeouts {
-					// Increment the timeout counter.
-					entry.availability_timeouts += 1;
-					if n_lookahead_used < n_lookahead {
-						entry.ttl = now + ttl;
-					} else {
-						// Over max capacity, we need to bump ttl (we exceeded the claim queue
-						// size, so otherwise the entry might get dropped before reaching the top):
-						entry.ttl = now + ttl + One::one();
-					}
-					Self::add_to_claim_queue(core_idx, entry);
-					// The claim has been added back into the claim queue.
-					// Do not pop another assignment for the core.
-					continue
-				} else {
-					// Consider timed out assignments for on demand parachains as concluded for
-					// the assignment provider
-					let ret = concluded_paras.insert(core_idx, entry.assignment);
-					debug_assert!(ret.is_none());
+	/// Push some assignments back to the provider.
+	fn push_back_to_assignment_provider(
+		assignments: impl core::iter::DoubleEndedIterator<Item = Assignment>,
+	) {
+		// Push back in reverse order so that when we pop from the provider again,
+		// the entries in the claim queue are in the same order as they are right
+		// now.
+		for assignment in assignments.rev() {
+			T::AssignmentProvider::push_back_assignment(assignment);
+		}
+	}
+
+	fn fill_claim_queue(core_idx: CoreIndex, n_lookahead: u32) {
+		ClaimQueue::<T>::mutate(|la| {
+			let cq = la.entry(core_idx).or_default();
+
+			let mut n_lookahead_used = cq.len() as u32;
+
+			// If the claim queue used to be empty, we need to double the first assignment.
+			// Otherwise, the para will only be able to get the collation in right at the next block
+			// (synchronous backing).
+			// Only do this if the configured lookahead is greater than 1. Otherwise, it doesn't
+			// make sense.
+			if n_lookahead_used == 0 && n_lookahead > 1 {
+				if let Some(assignment) = T::AssignmentProvider::pop_assignment_for_core(core_idx) {
+					T::AssignmentProvider::assignment_duplicated(&assignment);
+					cq.push_back(assignment.clone());
+					cq.push_back(assignment);
+					n_lookahead_used += 2;
 				}
 			}
 
-			if let Some(concluded_para) = concluded_paras.remove(&core_idx) {
-				T::AssignmentProvider::report_processed(concluded_para);
-			}
 			for _ in n_lookahead_used..n_lookahead {
 				if let Some(assignment) = T::AssignmentProvider::pop_assignment_for_core(core_idx) {
-					Self::add_to_claim_queue(core_idx, ParasEntry::new(assignment, now + ttl));
+					cq.push_back(assignment);
+				} else {
+					break
 				}
 			}
-		}
 
-		debug_assert!(timedout_paras.is_empty());
-		debug_assert!(concluded_paras.is_empty());
-	}
-
-	fn add_to_claim_queue(core_idx: CoreIndex, pe: ParasEntryType<T>) {
-		ClaimQueue::<T>::mutate(|la| {
-			la.entry(core_idx).or_default().push_back(pe);
+			// If we didn't end up pushing anything, remove the entry. We don't want to waste the
+			// space if we've no assignments.
+			if cq.is_empty() {
+				la.remove(&core_idx);
+			}
 		});
 	}
 
-	/// Returns `ParasEntry` with `para_id` at `core_idx` if found.
-	fn remove_from_claim_queue(
-		core_idx: CoreIndex,
-		para_id: ParaId,
-	) -> Result<(PositionInClaimQueue, ParasEntryType<T>), &'static str> {
-		ClaimQueue::<T>::mutate(|cq| {
-			let core_claims = cq.get_mut(&core_idx).ok_or("core_idx not found in lookahead")?;
-
-			let pos = core_claims
-				.iter()
-				.position(|pe| pe.para_id() == para_id)
-				.ok_or("para id not found at core_idx lookahead")?;
-
-			let pe = core_claims.remove(pos).ok_or("remove returned None")?;
-
-			Ok((pos as u32, pe))
-		})
-	}
-
-	/// Paras scheduled next in the claim queue.
-	pub(crate) fn scheduled_paras() -> impl Iterator<Item = (CoreIndex, ParaId)> {
-		let claim_queue = ClaimQueue::<T>::get();
-		claim_queue
-			.into_iter()
-			.filter_map(|(core_idx, v)| v.front().map(|e| (core_idx, e.assignment.para_id())))
-	}
-
-	/// Paras that may get backed on cores.
-	///
-	/// 1. The para must be scheduled on core.
-	/// 2. Core needs to be free, otherwise backing is not possible.
-	pub(crate) fn eligible_paras() -> impl Iterator<Item = (CoreIndex, ParaId)> {
-		let availability_cores = AvailabilityCores::<T>::get();
-
-		Self::claim_queue_iterator().zip(availability_cores.into_iter()).filter_map(
-			|((core_idx, queue), core)| {
-				if core != CoreOccupied::Free {
-					return None
-				}
-				let next_scheduled = queue.front()?;
-				Some((core_idx, next_scheduled.assignment.para_id()))
-			},
-		)
+	fn pop_front_of_claim_queue(core_idx: &CoreIndex) -> Option<Assignment> {
+		ClaimQueue::<T>::mutate(|cq| cq.get_mut(core_idx)?.pop_front())
 	}
 
 	#[cfg(any(feature = "try-runtime", test))]
@@ -726,7 +443,7 @@ impl<T: Config> Pallet<T> {
 	}
 
 	#[cfg(test)]
-	pub(crate) fn set_claim_queue(claim_queue: BTreeMap<CoreIndex, VecDeque<ParasEntryType<T>>>) {
+	pub(crate) fn set_claim_queue(claim_queue: BTreeMap<CoreIndex, VecDeque<Assignment>>) {
 		ClaimQueue::<T>::set(claim_queue);
 	}
 }
diff --git a/polkadot/runtime/parachains/src/scheduler/common.rs b/polkadot/runtime/parachains/src/scheduler/common.rs
index 114cd4b940bcdbeacdf0cbb7f62e74fc09dbad4d..bf8a2bee74e3c96e8b6771066c0eb8cbf4f02be4 100644
--- a/polkadot/runtime/parachains/src/scheduler/common.rs
+++ b/polkadot/runtime/parachains/src/scheduler/common.rs
@@ -77,11 +77,6 @@ pub trait AssignmentProvider<BlockNumber> {
 	#[cfg(any(feature = "runtime-benchmarks", test))]
 	fn get_mock_assignment(core_idx: CoreIndex, para_id: ParaId) -> Assignment;
 
-	/// How many cores are allocated to this provider.
-	///
-	/// As the name suggests the core count has to be session buffered:
-	///
-	/// - Core count has to be predetermined for the next session in the current session.
-	/// - Core count must not change during a session.
-	fn session_core_count() -> u32;
+	/// Report that an assignment was duplicated by the scheduler.
+	fn assignment_duplicated(assignment: &Assignment);
 }
diff --git a/polkadot/runtime/parachains/src/scheduler/migration.rs b/polkadot/runtime/parachains/src/scheduler/migration.rs
index 125f105ef70668c8ebacfdb9e2b267b9d00bfcbf..e741711cad6d899557ea87788b4eec996784c330 100644
--- a/polkadot/runtime/parachains/src/scheduler/migration.rs
+++ b/polkadot/runtime/parachains/src/scheduler/migration.rs
@@ -268,7 +268,7 @@ pub type MigrateV0ToV1<T> = VersionedMigration<
 	<T as frame_system::Config>::DbWeight,
 >;
 
-mod v2 {
+pub(crate) mod v2 {
 	use super::*;
 	use crate::scheduler;
 
@@ -406,3 +406,89 @@ pub type MigrateV1ToV2<T> = VersionedMigration<
 	Pallet<T>,
 	<T as frame_system::Config>::DbWeight,
 >;
+
+/// Migration for TTL and availability timeout retries removal.
+/// AvailabilityCores storage is removed and ClaimQueue now holds `Assignment`s instead of
+/// `ParasEntryType`
+mod v3 {
+	use super::*;
+	use crate::scheduler;
+
+	#[storage_alias]
+	pub(crate) type ClaimQueue<T: Config> =
+		StorageValue<Pallet<T>, BTreeMap<CoreIndex, VecDeque<Assignment>>, ValueQuery>;
+	/// Migration to V3
+	pub struct UncheckedMigrateToV3<T>(core::marker::PhantomData<T>);
+
+	impl<T: Config> UncheckedOnRuntimeUpgrade for UncheckedMigrateToV3<T> {
+		fn on_runtime_upgrade() -> Weight {
+			let mut weight: Weight = Weight::zero();
+
+			// Migrate ClaimQueuee to new format.
+
+			let old = v2::ClaimQueue::<T>::take();
+			let new = old
+				.into_iter()
+				.map(|(k, v)| {
+					(
+						k,
+						v.into_iter()
+							.map(|paras_entry| paras_entry.assignment)
+							.collect::<VecDeque<_>>(),
+					)
+				})
+				.collect::<BTreeMap<CoreIndex, VecDeque<Assignment>>>();
+
+			v3::ClaimQueue::<T>::put(new);
+
+			// Clear AvailabilityCores storage
+			v2::AvailabilityCores::<T>::kill();
+
+			weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2));
+
+			log::info!(target: scheduler::LOG_TARGET, "Migrating para scheduler storage to v3");
+
+			weight
+		}
+
+		#[cfg(feature = "try-runtime")]
+		fn pre_upgrade() -> Result<Vec<u8>, sp_runtime::DispatchError> {
+			log::trace!(
+				target: crate::scheduler::LOG_TARGET,
+				"ClaimQueue before migration: {}",
+				v2::ClaimQueue::<T>::get().len()
+			);
+
+			let bytes = u32::to_be_bytes(v2::ClaimQueue::<T>::get().len() as u32);
+
+			Ok(bytes.to_vec())
+		}
+
+		#[cfg(feature = "try-runtime")]
+		fn post_upgrade(state: Vec<u8>) -> Result<(), sp_runtime::DispatchError> {
+			log::trace!(target: crate::scheduler::LOG_TARGET, "Running post_upgrade()");
+
+			let old_len = u32::from_be_bytes(state.try_into().unwrap());
+			ensure!(
+				v3::ClaimQueue::<T>::get().len() as u32 == old_len,
+				"Old ClaimQueue completely moved to new ClaimQueue after migration"
+			);
+
+			ensure!(
+				!v2::AvailabilityCores::<T>::exists(),
+				"AvailabilityCores storage should have been completely killed"
+			);
+
+			Ok(())
+		}
+	}
+}
+
+/// Migrate `V2` to `V3` of the storage format.
+pub type MigrateV2ToV3<T> = VersionedMigration<
+	2,
+	3,
+	v3::UncheckedMigrateToV3<T>,
+	Pallet<T>,
+	<T as frame_system::Config>::DbWeight,
+>;
diff --git a/polkadot/runtime/parachains/src/scheduler/tests.rs b/polkadot/runtime/parachains/src/scheduler/tests.rs
index 5f80114b596368135e91ca59ca9c7e6f0b59510f..5be7e084f3bca3588301fa3970d0873fe83ba687 100644
--- a/polkadot/runtime/parachains/src/scheduler/tests.rs
+++ b/polkadot/runtime/parachains/src/scheduler/tests.rs
@@ -16,7 +16,7 @@
 
 use super::*;
 
-use alloc::collections::{btree_map::BTreeMap, btree_set::BTreeSet};
+use alloc::collections::btree_map::BTreeMap;
 use frame_support::assert_ok;
 use polkadot_primitives::{
 	BlockNumber, SchedulerParams, SessionIndex, ValidationCode, ValidatorId,
@@ -27,14 +27,14 @@ use crate::{
 	configuration::HostConfiguration,
 	initializer::SessionChangeNotification,
 	mock::{
-		new_test_ext, MockAssigner, MockGenesisConfig, Paras, ParasShared, RuntimeOrigin,
-		Scheduler, System, Test,
+		new_test_ext, Configuration, MockAssigner, MockGenesisConfig, Paras, ParasShared,
+		RuntimeOrigin, Scheduler, System, Test,
 	},
 	paras::{ParaGenesisArgs, ParaKind},
 	scheduler::{self, common::Assignment, ClaimQueue},
 };
 
-fn schedule_blank_para(id: ParaId) {
+fn register_para(id: ParaId) {
 	let validation_code: ValidationCode = vec![1, 2, 3].into();
 	assert_ok!(Paras::schedule_para_initialize(
 		id,
@@ -58,17 +58,18 @@ fn run_to_block(
 		Scheduler::initializer_finalize();
 		Paras::initializer_finalize(b);
 
-		if let Some(notification) = new_session(b + 1) {
-			let mut notification_with_session_index = notification;
+		if let Some(mut notification) = new_session(b + 1) {
 			// We will make every session change trigger an action queue. Normally this may require
 			// 2 or more session changes.
-			if notification_with_session_index.session_index == SessionIndex::default() {
-				notification_with_session_index.session_index = ParasShared::scheduled_session();
+			if notification.session_index == SessionIndex::default() {
+				notification.session_index = ParasShared::scheduled_session();
 			}
-			Scheduler::pre_new_session();
 
-			Paras::initializer_on_new_session(&notification_with_session_index);
-			Scheduler::initializer_on_new_session(&notification_with_session_index);
+			Configuration::force_set_active_config(notification.new_config.clone());
+
+			Paras::initializer_on_new_session(&notification);
+
+			Scheduler::initializer_on_new_session(&notification);
 		}
 
 		System::on_finalize(b);
@@ -79,28 +80,8 @@ fn run_to_block(
 		Paras::initializer_initialize(b + 1);
 		Scheduler::initializer_initialize(b + 1);
 
-		// In the real runtime this is expected to be called by the `InclusionInherent` pallet.
-		Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), b + 1);
-	}
-}
-
-fn run_to_end_of_block(
-	to: BlockNumber,
-	new_session: impl Fn(BlockNumber) -> Option<SessionChangeNotification<BlockNumber>>,
-) {
-	run_to_block(to, &new_session);
-
-	Scheduler::initializer_finalize();
-	Paras::initializer_finalize(to);
-
-	if let Some(notification) = new_session(to + 1) {
-		Scheduler::pre_new_session();
-
-		Paras::initializer_on_new_session(&notification);
-		Scheduler::initializer_on_new_session(&notification);
+		Scheduler::advance_claim_queue(&Default::default());
 	}
-
-	System::on_finalize(to);
 }
 
 fn default_config() -> HostConfiguration<BlockNumber> {
@@ -110,6 +91,7 @@ fn default_config() -> HostConfiguration<BlockNumber> {
 		// `minimum_validation_upgrade_delay` is greater than `chain_availability_period` and
 		// `thread_availability_period`.
 		minimum_validation_upgrade_delay: 6,
+		#[allow(deprecated)]
 		scheduler_params: SchedulerParams {
 			group_rotation_frequency: 10,
 			paras_availability_period: 3,
@@ -129,172 +111,27 @@ fn genesis_config(config: &HostConfiguration<BlockNumber>) -> MockGenesisConfig
 	}
 }
 
-fn claimqueue_contains_para_ids<T: Config>(pids: Vec<ParaId>) -> bool {
-	let set: BTreeSet<ParaId> = ClaimQueue::<T>::get()
+/// Internal access to assignments at the top of the claim queue.
+fn next_assignments() -> impl Iterator<Item = (CoreIndex, Assignment)> {
+	let claim_queue = ClaimQueue::<Test>::get();
+	claim_queue
 		.into_iter()
-		.flat_map(|(_, paras_entries)| paras_entries.into_iter().map(|pe| pe.assignment.para_id()))
-		.collect();
-
-	pids.into_iter().all(|pid| set.contains(&pid))
-}
-
-fn availability_cores_contains_para_ids<T: Config>(pids: Vec<ParaId>) -> bool {
-	let set: BTreeSet<ParaId> = AvailabilityCores::<T>::get()
-		.into_iter()
-		.filter_map(|core| match core {
-			CoreOccupied::Free => None,
-			CoreOccupied::Paras(entry) => Some(entry.para_id()),
-		})
-		.collect();
-
-	pids.into_iter().all(|pid| set.contains(&pid))
-}
-
-/// Internal access to entries at the top of the claim queue.
-fn scheduled_entries() -> impl Iterator<Item = (CoreIndex, ParasEntry<BlockNumberFor<Test>>)> {
-	let claimqueue = ClaimQueue::<Test>::get();
-	claimqueue
-		.into_iter()
-		.filter_map(|(core_idx, v)| v.front().map(|e| (core_idx, e.clone())))
-}
-
-#[test]
-fn claim_queue_iterator_handles_holes_correctly() {
-	let mut queue = BTreeMap::new();
-	queue.insert(CoreIndex(1), ["abc"].into_iter().collect());
-	queue.insert(CoreIndex(4), ["cde"].into_iter().collect());
-	let queue = queue.into_iter().peekable();
-	let mut i = ClaimQueueIterator { next_idx: 0, queue };
-
-	let (idx, e) = i.next().unwrap();
-	assert_eq!(idx, CoreIndex(0));
-	assert!(e.is_empty());
-
-	let (idx, e) = i.next().unwrap();
-	assert_eq!(idx, CoreIndex(1));
-	assert!(e.len() == 1);
-
-	let (idx, e) = i.next().unwrap();
-	assert_eq!(idx, CoreIndex(2));
-	assert!(e.is_empty());
-
-	let (idx, e) = i.next().unwrap();
-	assert_eq!(idx, CoreIndex(3));
-	assert!(e.is_empty());
-
-	let (idx, e) = i.next().unwrap();
-	assert_eq!(idx, CoreIndex(4));
-	assert!(e.len() == 1);
-
-	assert!(i.next().is_none());
+		.filter_map(|(core_idx, v)| v.front().map(|a| (core_idx, a.clone())))
 }
 
 #[test]
-fn claimqueue_ttl_drop_fn_works() {
+fn session_change_shuffles_validators() {
 	let mut config = default_config();
-	config.scheduler_params.lookahead = 3;
+	// Need five cores for this test
+	config.scheduler_params.num_cores = 5;
 	let genesis_config = genesis_config(&config);
 
-	let para_id = ParaId::from(100);
-	let core_idx = CoreIndex::from(0);
-	let mut now = 10;
-
 	new_test_ext(genesis_config).execute_with(|| {
-		assert!(config.scheduler_params.ttl == 5);
-		// Register and run to a blockheight where the para is in a valid state.
-		schedule_blank_para(para_id);
-		run_to_block(now, |n| if n == now { Some(Default::default()) } else { None });
-
-		// Add a claim on core 0 with a ttl in the past.
-		let paras_entry = ParasEntry::new(Assignment::Bulk(para_id), now - 5 as u32);
-		Scheduler::add_to_claim_queue(core_idx, paras_entry.clone());
-
-		// Claim is in queue prior to call.
-		assert!(claimqueue_contains_para_ids::<Test>(vec![para_id]));
-
-		// Claim is dropped post call.
-		Scheduler::drop_expired_claims_from_claim_queue();
-		assert!(!claimqueue_contains_para_ids::<Test>(vec![para_id]));
-
-		// Add a claim on core 0 with a ttl in the future (15).
-		let paras_entry = ParasEntry::new(Assignment::Bulk(para_id), now + 5);
-		Scheduler::add_to_claim_queue(core_idx, paras_entry.clone());
-
-		// Claim is in queue post call.
-		Scheduler::drop_expired_claims_from_claim_queue();
-		assert!(claimqueue_contains_para_ids::<Test>(vec![para_id]));
-
-		now = now + 6;
-		run_to_block(now, |_| None);
-
-		// Claim is dropped
-		Scheduler::drop_expired_claims_from_claim_queue();
-		assert!(!claimqueue_contains_para_ids::<Test>(vec![para_id]));
+		assert!(ValidatorGroups::<Test>::get().is_empty());
 
-		// Add a claim on core 0 with a ttl == now (16)
-		let paras_entry = ParasEntry::new(Assignment::Bulk(para_id), now);
-		Scheduler::add_to_claim_queue(core_idx, paras_entry.clone());
-
-		// Claim is in queue post call.
-		Scheduler::drop_expired_claims_from_claim_queue();
-		assert!(claimqueue_contains_para_ids::<Test>(vec![para_id]));
-
-		now = now + 1;
-		run_to_block(now, |_| None);
-
-		// Drop expired claim.
-		Scheduler::drop_expired_claims_from_claim_queue();
-		assert!(!claimqueue_contains_para_ids::<Test>(vec![para_id]));
-
-		// Add a claim on core 0 with a ttl == now (17)
-		let paras_entry_non_expired = ParasEntry::new(Assignment::Bulk(para_id), now);
-		let paras_entry_expired = ParasEntry::new(Assignment::Bulk(para_id), now - 2);
-		// ttls = [17, 15, 17]
-		Scheduler::add_to_claim_queue(core_idx, paras_entry_non_expired.clone());
-		Scheduler::add_to_claim_queue(core_idx, paras_entry_expired.clone());
-		Scheduler::add_to_claim_queue(core_idx, paras_entry_non_expired.clone());
-		let cq = scheduler::ClaimQueue::<Test>::get();
-		assert_eq!(cq.get(&core_idx).unwrap().len(), 3);
-
-		// Add a claim to the test assignment provider.
-		let assignment = Assignment::Bulk(para_id);
-
-		MockAssigner::add_test_assignment(assignment.clone());
-
-		// Drop expired claim.
-		Scheduler::drop_expired_claims_from_claim_queue();
-
-		let cq = scheduler::ClaimQueue::<Test>::get();
-		let cqc = cq.get(&core_idx).unwrap();
-		// Same number of claims, because a new claim is popped from `MockAssigner` instead of the
-		// expired one
-		assert_eq!(cqc.len(), 3);
-
-		// The first 2 claims in the queue should have a ttl of 17,
-		// being the ones set up prior in this test as claims 1 and 3.
-		// The third claim is popped from the assignment provider and
-		// has a new ttl set by the scheduler of now +
-		// assignment_provider_ttl. ttls = [17, 17, 22]
-		assert!(cqc.iter().enumerate().all(|(index, entry)| {
-			match index {
-				0 | 1 => entry.clone().ttl == 17,
-				2 => entry.clone().ttl == 22,
-				_ => false,
-			}
-		}))
-	});
-}
-
-#[test]
-fn session_change_shuffles_validators() {
-	let genesis_config = genesis_config(&default_config());
-
-	new_test_ext(genesis_config).execute_with(|| {
-		// Need five cores for this test
-		MockAssigner::set_core_count(5);
 		run_to_block(1, |number| match number {
 			1 => Some(SessionChangeNotification {
-				new_config: default_config(),
+				new_config: config.clone(),
 				validators: vec![
 					ValidatorId::from(Sr25519Keyring::Alice.public()),
 					ValidatorId::from(Sr25519Keyring::Bob.public()),
@@ -328,6 +165,8 @@ fn session_change_shuffles_validators() {
 fn session_change_takes_only_max_per_core() {
 	let config = {
 		let mut config = default_config();
+		// Simulate 2 cores between all usage types
+		config.scheduler_params.num_cores = 2;
 		config.scheduler_params.max_validators_per_core = Some(1);
 		config
 	};
@@ -335,9 +174,6 @@ fn session_change_takes_only_max_per_core() {
 	let genesis_config = genesis_config(&config);
 
 	new_test_ext(genesis_config).execute_with(|| {
-		// Simulate 2 cores between all usage types
-		MockAssigner::set_core_count(2);
-
 		run_to_block(1, |number| match number {
 			1 => Some(SessionChangeNotification {
 				new_config: config.clone(),
@@ -367,8 +203,12 @@ fn session_change_takes_only_max_per_core() {
 }
 
 #[test]
-fn fill_claimqueue_fills() {
-	let config = default_config();
+// Test that `advance_claim_queue` doubles the first assignment only for a core that didn't use to
+// have any assignments.
+fn advance_claim_queue_doubles_assignment_only_if_empty() {
+	let mut config = default_config();
+	config.scheduler_params.lookahead = 3;
+	config.scheduler_params.num_cores = 2;
 	let genesis_config = genesis_config(&config);
 
 	let para_a = ParaId::from(3_u32);
@@ -380,18 +220,15 @@ fn fill_claimqueue_fills() {
 	let assignment_c = Assignment::Bulk(para_c);
 
 	new_test_ext(genesis_config).execute_with(|| {
-		MockAssigner::set_core_count(2);
-		let coretime_ttl = config.scheduler_params.ttl;
-
 		// Add 3 paras
-		schedule_blank_para(para_a);
-		schedule_blank_para(para_b);
-		schedule_blank_para(para_c);
+		register_para(para_a);
+		register_para(para_b);
+		register_para(para_c);
 
 		// start a new session to activate, 2 validators for 2 cores.
 		run_to_block(1, |number| match number {
 			1 => Some(SessionChangeNotification {
-				new_config: default_config(),
+				new_config: config.clone(),
 				validators: vec![
 					ValidatorId::from(Sr25519Keyring::Alice.public()),
 					ValidatorId::from(Sr25519Keyring::Bob.public()),
@@ -406,224 +243,108 @@ fn fill_claimqueue_fills() {
 		MockAssigner::add_test_assignment(assignment_b.clone());
 		MockAssigner::add_test_assignment(assignment_c.clone());
 
+		// This will call advance_claim_queue
 		run_to_block(2, |_| None);
 
 		{
-			assert_eq!(Scheduler::claim_queue_len(), 3);
-			let scheduled: BTreeMap<_, _> = scheduled_entries().collect();
+			assert_eq!(Scheduler::claim_queue_len(), 5);
+			let mut claim_queue = scheduler::ClaimQueue::<Test>::get();
 
-			// Was added a block later, note the TTL.
-			assert_eq!(
-				scheduled.get(&CoreIndex(0)).unwrap(),
-				&ParasEntry {
-					assignment: assignment_a.clone(),
-					availability_timeouts: 0,
-					ttl: 2 + coretime_ttl
-				},
-			);
-			// Sits on the same core as `para_a`
+			// Because the claim queue used to be empty, the first assignment is doubled for every
+			// core so that the first para gets a fair shot at backing something.
 			assert_eq!(
-				scheduler::ClaimQueue::<Test>::get().get(&CoreIndex(0)).unwrap()[1],
-				ParasEntry {
-					assignment: assignment_b.clone(),
-					availability_timeouts: 0,
-					ttl: 2 + coretime_ttl
-				}
+				claim_queue.remove(&CoreIndex(0)).unwrap(),
+				[assignment_a.clone(), assignment_a, assignment_b]
+					.into_iter()
+					.collect::<VecDeque<_>>()
 			);
 			assert_eq!(
-				scheduled.get(&CoreIndex(1)).unwrap(),
-				&ParasEntry {
-					assignment: assignment_c.clone(),
-					availability_timeouts: 0,
-					ttl: 2 + coretime_ttl
-				},
+				claim_queue.remove(&CoreIndex(1)).unwrap(),
+				[assignment_c.clone(), assignment_c].into_iter().collect::<VecDeque<_>>()
 			);
 		}
 	});
 }
 
 #[test]
-fn schedule_schedules_including_just_freed() {
+// Test that `advance_claim_queue` doesn't populate for cores which have no assignments.
+fn advance_claim_queue_no_entry_if_empty() {
 	let mut config = default_config();
-	// NOTE: This test expects on demand cores to each get slotted on to a different core
-	// and not fill up the claimqueue of each core first.
-	config.scheduler_params.lookahead = 1;
+	config.scheduler_params.lookahead = 3;
+	config.scheduler_params.num_cores = 2;
 	let genesis_config = genesis_config(&config);
 
 	let para_a = ParaId::from(3_u32);
-	let para_b = ParaId::from(4_u32);
-	let para_c = ParaId::from(5_u32);
-	let para_d = ParaId::from(6_u32);
-	let para_e = ParaId::from(7_u32);
-
 	let assignment_a = Assignment::Bulk(para_a);
-	let assignment_b = Assignment::Bulk(para_b);
-	let assignment_c = Assignment::Bulk(para_c);
-	let assignment_d = Assignment::Bulk(para_d);
-	let assignment_e = Assignment::Bulk(para_e);
 
 	new_test_ext(genesis_config).execute_with(|| {
-		MockAssigner::set_core_count(3);
-
-		// add 5 paras
-		schedule_blank_para(para_a);
-		schedule_blank_para(para_b);
-		schedule_blank_para(para_c);
-		schedule_blank_para(para_d);
-		schedule_blank_para(para_e);
+		// Add 1 para
+		register_para(para_a);
 
-		// start a new session to activate, 3 validators for 3 cores.
+		// start a new session to activate, 2 validators for 2 cores.
 		run_to_block(1, |number| match number {
 			1 => Some(SessionChangeNotification {
-				new_config: default_config(),
+				new_config: config.clone(),
 				validators: vec![
 					ValidatorId::from(Sr25519Keyring::Alice.public()),
 					ValidatorId::from(Sr25519Keyring::Bob.public()),
-					ValidatorId::from(Sr25519Keyring::Charlie.public()),
 				],
 				..Default::default()
 			}),
 			_ => None,
 		});
 
-		// add a couple of para claims now that paras are live
-		MockAssigner::add_test_assignment(assignment_a.clone());
-		MockAssigner::add_test_assignment(assignment_c.clone());
-
-		let mut now = 2;
-		run_to_block(now, |_| None);
-
-		assert_eq!(Scheduler::scheduled_paras().collect::<Vec<_>>().len(), 2);
-
-		// cores 0, 1 should be occupied. mark them as such.
-		let mut occupied_map: BTreeMap<CoreIndex, ParaId> = BTreeMap::new();
-		occupied_map.insert(CoreIndex(0), para_a);
-		occupied_map.insert(CoreIndex(1), para_c);
-		Scheduler::occupied(occupied_map);
-
-		{
-			let cores = AvailabilityCores::<Test>::get();
-
-			// cores 0, 1 are `CoreOccupied::Paras(ParasEntry...)`
-			assert!(cores[0] != CoreOccupied::Free);
-			assert!(cores[1] != CoreOccupied::Free);
-
-			// core 2 is free
-			assert!(cores[2] == CoreOccupied::Free);
-
-			assert!(Scheduler::scheduled_paras().collect::<Vec<_>>().is_empty());
-
-			// All `core_queue`s should be empty
-			scheduler::ClaimQueue::<Test>::get()
-				.iter()
-				.for_each(|(_core_idx, core_queue)| assert_eq!(core_queue.len(), 0))
-		}
-
 		MockAssigner::add_test_assignment(assignment_a.clone());
-		MockAssigner::add_test_assignment(assignment_c.clone());
-		MockAssigner::add_test_assignment(assignment_b.clone());
-		MockAssigner::add_test_assignment(assignment_d.clone());
-		MockAssigner::add_test_assignment(assignment_e.clone());
-		now = 3;
-		run_to_block(now, |_| None);
 
-		{
-			let scheduled: BTreeMap<_, _> = scheduled_entries().collect();
-
-			assert_eq!(scheduled.len(), 3);
-			assert_eq!(
-				scheduled.get(&CoreIndex(2)).unwrap(),
-				&ParasEntry {
-					assignment: Assignment::Bulk(para_b),
-					availability_timeouts: 0,
-					ttl: 8
-				},
-			);
-		}
-
-		// now note that cores 0 and 1 were freed.
-		let just_updated: BTreeMap<CoreIndex, FreedReason> = vec![
-			(CoreIndex(0), FreedReason::Concluded),
-			(CoreIndex(1), FreedReason::TimedOut), // should go back on queue.
-		]
-		.into_iter()
-		.collect();
-		Scheduler::free_cores_and_fill_claim_queue(just_updated, now);
+		// This will call advance_claim_queue
+		run_to_block(3, |_| None);
 
 		{
-			let scheduled: BTreeMap<_, _> = scheduled_entries().collect();
+			let mut claim_queue = scheduler::ClaimQueue::<Test>::get();
 
-			// 1 thing scheduled before, + 2 cores freed.
-			assert_eq!(scheduled.len(), 3);
-			assert_eq!(
-				scheduled.get(&CoreIndex(0)).unwrap(),
-				&ParasEntry {
-					// Next entry in queue is `a` again:
-					assignment: Assignment::Bulk(para_a),
-					availability_timeouts: 0,
-					ttl: 8
-				},
-			);
-			// Although C was descheduled, the core `2` was occupied so C goes back to the queue.
 			assert_eq!(
-				scheduler::ClaimQueue::<Test>::get()[&CoreIndex(1)][1],
-				ParasEntry {
-					assignment: Assignment::Bulk(para_c),
-					// End of the queue should be the pushed back entry:
-					availability_timeouts: 1,
-					// ttl 1 higher:
-					ttl: 9
-				},
-			);
-			assert_eq!(
-				scheduled.get(&CoreIndex(1)).unwrap(),
-				&ParasEntry {
-					assignment: Assignment::Bulk(para_c),
-					availability_timeouts: 0,
-					ttl: 8
-				},
-			);
-			assert_eq!(
-				scheduled.get(&CoreIndex(2)).unwrap(),
-				&ParasEntry {
-					assignment: Assignment::Bulk(para_b),
-					availability_timeouts: 0,
-					ttl: 8
-				},
+				claim_queue.remove(&CoreIndex(0)).unwrap(),
+				[assignment_a].into_iter().collect::<VecDeque<_>>()
 			);
 
-			assert!(claimqueue_contains_para_ids::<Test>(vec![para_c]));
-			assert!(!availability_cores_contains_para_ids::<Test>(vec![para_a, para_c]));
+			// Even though core 1 exists, there's no assignment for it so it's not present in the
+			// claim queue.
+			assert!(claim_queue.remove(&CoreIndex(1)).is_none());
 		}
 	});
 }
 
 #[test]
-fn schedule_clears_availability_cores() {
+// Test that `advance_claim_queue` only advances for cores that are not part of the `except_for`
+// set.
+fn advance_claim_queue_except_for() {
 	let mut config = default_config();
+	// NOTE: This test expects on demand cores to each get slotted on to a different core
+	// and not fill up the claimqueue of each core first.
 	config.scheduler_params.lookahead = 1;
+	config.scheduler_params.num_cores = 3;
+
 	let genesis_config = genesis_config(&config);
 
 	let para_a = ParaId::from(1_u32);
 	let para_b = ParaId::from(2_u32);
 	let para_c = ParaId::from(3_u32);
+	let para_d = ParaId::from(4_u32);
+	let para_e = ParaId::from(5_u32);
 
 	let assignment_a = Assignment::Bulk(para_a);
 	let assignment_b = Assignment::Bulk(para_b);
 	let assignment_c = Assignment::Bulk(para_c);
+	let assignment_d = Assignment::Bulk(para_d);
+	let assignment_e = Assignment::Bulk(para_e);
 
 	new_test_ext(genesis_config).execute_with(|| {
-		MockAssigner::set_core_count(3);
-
-		// register 3 paras
-		schedule_blank_para(para_a);
-		schedule_blank_para(para_b);
-		schedule_blank_para(para_c);
-
-		// Adding assignments then running block to populate claim queue
-		MockAssigner::add_test_assignment(assignment_a.clone());
-		MockAssigner::add_test_assignment(assignment_b.clone());
-		MockAssigner::add_test_assignment(assignment_c.clone());
+		// add 5 paras
+		register_para(para_a);
+		register_para(para_b);
+		register_para(para_c);
+		register_para(para_d);
+		register_para(para_e);
 
 		// start a new session to activate, 3 validators for 3 cores.
 		run_to_block(1, |number| match number {
@@ -639,91 +360,69 @@ fn schedule_clears_availability_cores() {
 			_ => None,
 		});
 
-		run_to_block(2, |_| None);
-
-		assert_eq!(scheduler::ClaimQueue::<Test>::get().len(), 3);
-
-		// cores 0, 1, and 2 should be occupied. mark them as such.
-		Scheduler::occupied(
-			vec![(CoreIndex(0), para_a), (CoreIndex(1), para_b), (CoreIndex(2), para_c)]
-				.into_iter()
-				.collect(),
-		);
+		// add a couple of para claims now that paras are live
+		MockAssigner::add_test_assignment(assignment_a.clone());
+		MockAssigner::add_test_assignment(assignment_c.clone());
 
-		{
-			let cores = AvailabilityCores::<Test>::get();
+		run_to_block(2, |_| None);
 
-			assert_eq!(cores[0].is_free(), false);
-			assert_eq!(cores[1].is_free(), false);
-			assert_eq!(cores[2].is_free(), false);
+		Scheduler::advance_claim_queue(&Default::default());
 
-			// All `core_queue`s should be empty
-			scheduler::ClaimQueue::<Test>::get()
-				.iter()
-				.for_each(|(_core_idx, core_queue)| assert!(core_queue.len() == 0))
-		}
+		// Queues of all cores should be empty
+		assert_eq!(Scheduler::claim_queue_len(), 0);
 
-		// Add more assignments
 		MockAssigner::add_test_assignment(assignment_a.clone());
-		MockAssigner::add_test_assignment(assignment_b.clone());
 		MockAssigner::add_test_assignment(assignment_c.clone());
+		MockAssigner::add_test_assignment(assignment_b.clone());
+		MockAssigner::add_test_assignment(assignment_d.clone());
+		MockAssigner::add_test_assignment(assignment_e.clone());
 
 		run_to_block(3, |_| None);
 
-		// now note that cores 0 and 2 were freed.
-		Scheduler::free_cores_and_fill_claim_queue(
-			vec![(CoreIndex(0), FreedReason::Concluded), (CoreIndex(2), FreedReason::Concluded)]
-				.into_iter()
-				.collect::<Vec<_>>(),
-			3,
-		);
+		{
+			let scheduled: BTreeMap<_, _> = next_assignments().collect();
+
+			assert_eq!(scheduled.len(), 3);
+			assert_eq!(scheduled.get(&CoreIndex(0)).unwrap(), &Assignment::Bulk(para_a));
+			assert_eq!(scheduled.get(&CoreIndex(1)).unwrap(), &Assignment::Bulk(para_c));
+			assert_eq!(scheduled.get(&CoreIndex(2)).unwrap(), &Assignment::Bulk(para_b));
+		}
+
+		// now note that cores 0 and 1 were freed.
+		Scheduler::advance_claim_queue(&std::iter::once(CoreIndex(2)).collect());
 
 		{
-			let claimqueue = ClaimQueue::<Test>::get();
-			let claimqueue_0 = claimqueue.get(&CoreIndex(0)).unwrap().clone();
-			let claimqueue_2 = claimqueue.get(&CoreIndex(2)).unwrap().clone();
-			let entry_ttl = 8;
-			assert_eq!(claimqueue_0.len(), 1);
-			assert_eq!(claimqueue_2.len(), 1);
-			let queue_0_expectation: VecDeque<ParasEntryType<Test>> =
-				vec![ParasEntry::new(assignment_a, entry_ttl as u32)].into_iter().collect();
-			let queue_2_expectation: VecDeque<ParasEntryType<Test>> =
-				vec![ParasEntry::new(assignment_c, entry_ttl as u32)].into_iter().collect();
-			assert_eq!(claimqueue_0, queue_0_expectation);
-			assert_eq!(claimqueue_2, queue_2_expectation);
-
-			// The freed cores should be `Free` in `AvailabilityCores`.
-			let cores = AvailabilityCores::<Test>::get();
-			assert!(cores[0].is_free());
-			assert!(cores[2].is_free());
+			let scheduled: BTreeMap<_, _> = next_assignments().collect();
+
+			// 1 thing scheduled before, + 2 cores freed.
+			assert_eq!(scheduled.len(), 3);
+			assert_eq!(scheduled.get(&CoreIndex(0)).unwrap(), &Assignment::Bulk(para_d));
+			assert_eq!(scheduled.get(&CoreIndex(1)).unwrap(), &Assignment::Bulk(para_e));
+			assert_eq!(scheduled.get(&CoreIndex(2)).unwrap(), &Assignment::Bulk(para_b));
 		}
 	});
 }
 
 #[test]
 fn schedule_rotates_groups() {
+	let on_demand_cores = 2;
 	let config = {
 		let mut config = default_config();
 		config.scheduler_params.lookahead = 1;
+		config.scheduler_params.num_cores = on_demand_cores;
 		config
 	};
 
 	let rotation_frequency = config.scheduler_params.group_rotation_frequency;
-	let on_demand_cores = 2;
 
 	let genesis_config = genesis_config(&config);
 
 	let para_a = ParaId::from(1_u32);
 	let para_b = ParaId::from(2_u32);
 
-	let assignment_a = Assignment::Bulk(para_a);
-	let assignment_b = Assignment::Bulk(para_b);
-
 	new_test_ext(genesis_config).execute_with(|| {
-		MockAssigner::set_core_count(on_demand_cores);
-
-		schedule_blank_para(para_a);
-		schedule_blank_para(para_b);
+		register_para(para_a);
+		register_para(para_b);
 
 		// start a new session to activate, 2 validators for 2 cores.
 		run_to_block(1, |number| match number {
@@ -741,15 +440,10 @@ fn schedule_rotates_groups() {
 		let session_start_block = scheduler::SessionStartBlock::<Test>::get();
 		assert_eq!(session_start_block, 1);
 
-		MockAssigner::add_test_assignment(assignment_a.clone());
-		MockAssigner::add_test_assignment(assignment_b.clone());
-
 		let mut now = 2;
 		run_to_block(now, |_| None);
 
 		let assert_groups_rotated = |rotations: u32, now: &BlockNumberFor<Test>| {
-			let scheduled: BTreeMap<_, _> = Scheduler::scheduled_paras().collect();
-			assert_eq!(scheduled.len(), 2);
 			assert_eq!(
 				Scheduler::group_assigned_to_core(CoreIndex(0), *now).unwrap(),
 				GroupIndex((0u32 + rotations) % on_demand_cores)
@@ -764,7 +458,7 @@ fn schedule_rotates_groups() {
 
 		// one block before first rotation.
 		now = rotation_frequency;
-		run_to_block(rotation_frequency, |_| None);
+		run_to_block(now, |_| None);
 
 		assert_groups_rotated(0, &now);
 
@@ -785,134 +479,6 @@ fn schedule_rotates_groups() {
 	});
 }
 
-#[test]
-fn on_demand_claims_are_pruned_after_timing_out() {
-	let max_timeouts = 20;
-	let mut config = default_config();
-	config.scheduler_params.lookahead = 1;
-	// Need more timeouts for this test
-	config.scheduler_params.max_availability_timeouts = max_timeouts;
-	config.scheduler_params.ttl = BlockNumber::from(5u32);
-	let genesis_config = genesis_config(&config);
-
-	let para_a = ParaId::from(1_u32);
-
-	let assignment_a = Assignment::Bulk(para_a);
-
-	new_test_ext(genesis_config).execute_with(|| {
-		MockAssigner::set_core_count(2);
-		schedule_blank_para(para_a);
-
-		// #1
-		let mut now = 1;
-		run_to_block(now, |number| match number {
-			1 => Some(SessionChangeNotification {
-				new_config: default_config(),
-				validators: vec![
-					ValidatorId::from(Sr25519Keyring::Alice.public()),
-					ValidatorId::from(Sr25519Keyring::Eve.public()),
-				],
-				..Default::default()
-			}),
-			_ => None,
-		});
-
-		MockAssigner::add_test_assignment(assignment_a.clone());
-
-		// #2
-		now += 1;
-		run_to_block(now, |_| None);
-		assert_eq!(scheduler::ClaimQueue::<Test>::get().len(), 1);
-		// ParaId a is in the claimqueue.
-		assert!(claimqueue_contains_para_ids::<Test>(vec![para_a]));
-
-		Scheduler::occupied(vec![(CoreIndex(0), para_a)].into_iter().collect());
-		// ParaId a is no longer in the claimqueue.
-		assert!(!claimqueue_contains_para_ids::<Test>(vec![para_a]));
-		// It is in availability cores.
-		assert!(availability_cores_contains_para_ids::<Test>(vec![para_a]));
-
-		// #3
-		now += 1;
-		// Run to block #n over the max_retries value.
-		// In this case, both validator groups with time out on availability and
-		// the assignment will be dropped.
-		for n in now..=(now + max_timeouts + 1) {
-			// #n
-			run_to_block(n, |_| None);
-			// Time out on core 0.
-			let just_updated: BTreeMap<CoreIndex, FreedReason> = vec![
-				(CoreIndex(0), FreedReason::TimedOut), // should go back on queue.
-			]
-			.into_iter()
-			.collect();
-			Scheduler::free_cores_and_fill_claim_queue(just_updated, now);
-
-			// ParaId a exists in the claim queue until max_retries is reached.
-			if n < max_timeouts + now {
-				assert!(claimqueue_contains_para_ids::<Test>(vec![para_a]));
-			} else {
-				assert!(!claimqueue_contains_para_ids::<Test>(vec![para_a]));
-			}
-
-			let core_assignments = Scheduler::scheduled_paras().collect();
-			Scheduler::occupied(core_assignments);
-		}
-
-		// ParaId a does not exist in the claimqueue/availability_cores after
-		// threshold has been reached.
-		assert!(!claimqueue_contains_para_ids::<Test>(vec![para_a]));
-		assert!(!availability_cores_contains_para_ids::<Test>(vec![para_a]));
-
-		// #25
-		now += max_timeouts + 2;
-
-		// Add assignment back to the mix.
-		MockAssigner::add_test_assignment(assignment_a.clone());
-		run_to_block(now, |_| None);
-
-		assert!(claimqueue_contains_para_ids::<Test>(vec![para_a]));
-
-		// #26
-		now += 1;
-		// Run to block #n but this time have group 1 conclude the availability.
-		for n in now..=(now + max_timeouts + 1) {
-			// #n
-			run_to_block(n, |_| None);
-			// Time out core 0 if group 0 is assigned to it, if group 1 is assigned, conclude.
-			let mut just_updated: BTreeMap<CoreIndex, FreedReason> = BTreeMap::new();
-			if let Some(group) = Scheduler::group_assigned_to_core(CoreIndex(0), n) {
-				match group {
-					GroupIndex(0) => {
-						just_updated.insert(CoreIndex(0), FreedReason::TimedOut); // should go back on queue.
-					},
-					GroupIndex(1) => {
-						just_updated.insert(CoreIndex(0), FreedReason::Concluded);
-					},
-					_ => panic!("Should only have 2 groups here"),
-				}
-			}
-
-			Scheduler::free_cores_and_fill_claim_queue(just_updated, now);
-
-			// ParaId a exists in the claim queue until groups are rotated.
-			if n < 31 {
-				assert!(claimqueue_contains_para_ids::<Test>(vec![para_a]));
-			} else {
-				assert!(!claimqueue_contains_para_ids::<Test>(vec![para_a]));
-			}
-
-			let core_assignments = Scheduler::scheduled_paras().collect();
-			Scheduler::occupied(core_assignments);
-		}
-
-		// ParaId a does not exist in the claimqueue/availability_cores after
-		// being concluded
-		assert!(!claimqueue_contains_para_ids::<Test>(vec![para_a]));
-		assert!(!availability_cores_contains_para_ids::<Test>(vec![para_a]));
-	});
-}
-
 #[test]
 fn availability_predicate_works() {
 	let genesis_config = genesis_config(&default_config());
@@ -948,20 +514,21 @@ fn availability_predicate_works() {
 
 #[test]
 fn next_up_on_available_uses_next_scheduled_or_none() {
-	let genesis_config = genesis_config(&default_config());
+	let mut config = default_config();
+	config.scheduler_params.num_cores = 1;
+	let genesis_config = genesis_config(&config);
 
 	let para_a = ParaId::from(1_u32);
 	let para_b = ParaId::from(2_u32);
 
 	new_test_ext(genesis_config).execute_with(|| {
-		MockAssigner::set_core_count(1);
-		schedule_blank_para(para_a);
-		schedule_blank_para(para_b);
+		register_para(para_a);
+		register_para(para_b);
 
 		// start a new session to activate, 2 validators for 2 cores.
 		run_to_block(1, |number| match number {
 			1 => Some(SessionChangeNotification {
-				new_config: default_config(),
+				new_config: config.clone(),
 				validators: vec![
 					ValidatorId::from(Sr25519Keyring::Alice.public()),
 					ValidatorId::from(Sr25519Keyring::Eve.public()),
@@ -971,69 +538,57 @@ fn next_up_on_available_uses_next_scheduled_or_none() {
 			_ => None,
 		});
 
-		let entry_a = ParasEntry {
-			assignment: Assignment::Bulk(para_a),
-			availability_timeouts: 0 as u32,
-			ttl: 5 as u32,
-		};
-		let entry_b = ParasEntry {
-			assignment: Assignment::Bulk(para_b),
-			availability_timeouts: 0 as u32,
-			ttl: 5 as u32,
-		};
-
-		Scheduler::add_to_claim_queue(CoreIndex(0), entry_a.clone());
+		MockAssigner::add_test_assignment(Assignment::Bulk(para_a));
 
 		run_to_block(2, |_| None);
 
 		{
-			assert_eq!(Scheduler::claim_queue_len(), 1);
-			assert_eq!(scheduler::AvailabilityCores::<Test>::get().len(), 1);
-
-			let mut map = BTreeMap::new();
-			map.insert(CoreIndex(0), para_a);
-			Scheduler::occupied(map);
+			// Two assignments for A on core 0, because the claim queue used to be empty.
+			assert_eq!(Scheduler::claim_queue_len(), 2);
 
-			let cores = scheduler::AvailabilityCores::<Test>::get();
-			match &cores[0] {
-				CoreOccupied::Paras(entry) => assert_eq!(entry, &entry_a),
-				_ => panic!("There should only be one test assigner core"),
-			}
-
-			assert!(Scheduler::next_up_on_available(CoreIndex(0)).is_none());
+			assert!(Scheduler::next_up_on_available(CoreIndex(1)).is_none());
 
-			Scheduler::add_to_claim_queue(CoreIndex(0), entry_b);
+			assert_eq!(
+				Scheduler::next_up_on_available(CoreIndex(0)).unwrap(),
+				ScheduledCore { para_id: para_a, collator: None }
+			);
 
+			Scheduler::advance_claim_queue(&Default::default());
 			assert_eq!(
 				Scheduler::next_up_on_available(CoreIndex(0)).unwrap(),
-				ScheduledCore { para_id: para_b, collator: None }
+				ScheduledCore { para_id: para_a, collator: None }
 			);
+
+			Scheduler::advance_claim_queue(&Default::default());
+			assert!(Scheduler::next_up_on_available(CoreIndex(0)).is_none());
 		}
 	});
 }
 
 #[test]
-fn next_up_on_time_out_reuses_claim_if_nothing_queued() {
-	let genesis_config = genesis_config(&default_config());
+fn session_change_increasing_number_of_cores() {
+	let mut config = default_config();
+	config.scheduler_params.num_cores = 2;
+	let genesis_config = genesis_config(&config);
 
-	let para_a = ParaId::from(1_u32);
-	let para_b = ParaId::from(2_u32);
+	let para_a = ParaId::from(3_u32);
+	let para_b = ParaId::from(4_u32);
 
 	let assignment_a = Assignment::Bulk(para_a);
 	let assignment_b = Assignment::Bulk(para_b);
 
 	new_test_ext(genesis_config).execute_with(|| {
-		MockAssigner::set_core_count(1);
-		schedule_blank_para(para_a);
-		schedule_blank_para(para_b);
+		// Add 2 paras
+		register_para(para_a);
+		register_para(para_b);
 
 		// start a new session to activate, 2 validators for 2 cores.
 		run_to_block(1, |number| match number {
 			1 => Some(SessionChangeNotification {
-				new_config: default_config(),
+				new_config: config.clone(),
 				validators: vec![
 					ValidatorId::from(Sr25519Keyring::Alice.public()),
-					ValidatorId::from(Sr25519Keyring::Eve.public()),
+					ValidatorId::from(Sr25519Keyring::Bob.public()),
 				],
 				..Default::default()
 			}),
@@ -1041,193 +596,236 @@ fn next_up_on_time_out_reuses_claim_if_nothing_queued() {
 		});
 
 		MockAssigner::add_test_assignment(assignment_a.clone());
+		MockAssigner::add_test_assignment(assignment_b.clone());
 
+		// This will call advance_claim_queue
 		run_to_block(2, |_| None);
 
 		{
-			assert_eq!(scheduler::ClaimQueue::<Test>::get().len(), 1);
-			assert_eq!(scheduler::AvailabilityCores::<Test>::get().len(), 1);
-
-			let mut map = BTreeMap::new();
-			map.insert(CoreIndex(0), para_a);
-			Scheduler::occupied(map);
-
-			let cores = scheduler::AvailabilityCores::<Test>::get();
-			match cores.get(0).unwrap() {
-				CoreOccupied::Paras(entry) => {
-					assert_eq!(entry.assignment, assignment_a.clone());
-				},
-				_ => panic!("There should only be a single test assigner core"),
-			}
-
-			// There's nothing more to pop for core 0 from the assignment provider.
-			assert!(MockAssigner::pop_assignment_for_core(CoreIndex(0)).is_none());
+			let mut claim_queue = scheduler::ClaimQueue::<Test>::get();
+			assert_eq!(Scheduler::claim_queue_len(), 4);
 
 			assert_eq!(
-				Scheduler::next_up_on_time_out(CoreIndex(0)).unwrap(),
-				ScheduledCore { para_id: para_a, collator: None }
+				claim_queue.remove(&CoreIndex(0)).unwrap(),
+				[assignment_a.clone(), assignment_a.clone()]
+					.into_iter()
+					.collect::<VecDeque<_>>()
+			);
+			assert_eq!(
+				claim_queue.remove(&CoreIndex(1)).unwrap(),
+				[assignment_b.clone(), assignment_b.clone()]
+					.into_iter()
+					.collect::<VecDeque<_>>()
 			);
+		}
+
+		// Increase number of cores to 4.
+		let old_config = config;
+		let mut new_config = old_config.clone();
+		new_config.scheduler_params.num_cores = 4;
 
-			MockAssigner::add_test_assignment(assignment_b.clone());
+		// add another assignment for para b.
+		MockAssigner::add_test_assignment(assignment_b.clone());
+
+		run_to_block(3, |number| match number {
+			3 => Some(SessionChangeNotification {
+				new_config: new_config.clone(),
+				prev_config: old_config.clone(),
+				validators: vec![
+					ValidatorId::from(Sr25519Keyring::Alice.public()),
+					ValidatorId::from(Sr25519Keyring::Bob.public()),
+					ValidatorId::from(Sr25519Keyring::Charlie.public()),
+					ValidatorId::from(Sr25519Keyring::Dave.public()),
+				],
+				..Default::default()
+			}),
+			_ => None,
+		});
 
-			// Pop assignment_b into the claimqueue
-			Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), 2);
+		{
+			let mut claim_queue = scheduler::ClaimQueue::<Test>::get();
+			assert_eq!(Scheduler::claim_queue_len(), 3);
 
-			//// Now that there is an earlier next-up, we use that.
 			assert_eq!(
-				Scheduler::next_up_on_available(CoreIndex(0)).unwrap(),
-				ScheduledCore { para_id: para_b, collator: None }
+				claim_queue.remove(&CoreIndex(0)).unwrap(),
+				[assignment_a].into_iter().collect::<VecDeque<_>>()
+			);
+			assert_eq!(
+				claim_queue.remove(&CoreIndex(1)).unwrap(),
+				[assignment_b.clone()].into_iter().collect::<VecDeque<_>>()
+			);
+			assert_eq!(
+				claim_queue.remove(&CoreIndex(2)).unwrap(),
+				[assignment_b.clone()].into_iter().collect::<VecDeque<_>>()
 			);
 		}
 	});
 }
 
 #[test]
-fn session_change_requires_reschedule_dropping_removed_paras() {
+fn session_change_decreasing_number_of_cores() {
 	let mut config = default_config();
-	config.scheduler_params.lookahead = 1;
+	config.scheduler_params.num_cores = 3;
 	let genesis_config = genesis_config(&config);
 
-	let para_a = ParaId::from(1_u32);
-	let para_b = ParaId::from(2_u32);
+	let para_a = ParaId::from(3_u32);
+	let para_b = ParaId::from(4_u32);
 
 	let assignment_a = Assignment::Bulk(para_a);
 	let assignment_b = Assignment::Bulk(para_b);
 
 	new_test_ext(genesis_config).execute_with(|| {
-		// Setting explicit core count
-		MockAssigner::set_core_count(5);
-		let coretime_ttl = configuration::ActiveConfig::<Test>::get().scheduler_params.ttl;
-
-		schedule_blank_para(para_a);
-		schedule_blank_para(para_b);
-
-		// Add assignments
-		MockAssigner::add_test_assignment(assignment_a.clone());
-		MockAssigner::add_test_assignment(assignment_b.clone());
+		// Add 2 paras
+		register_para(para_a);
+		register_para(para_b);
 
+		// start a new session to activate, 2 validators for 2 cores.
 		run_to_block(1, |number| match number {
 			1 => Some(SessionChangeNotification {
-				new_config: default_config(),
+				new_config: config.clone(),
 				validators: vec![
 					ValidatorId::from(Sr25519Keyring::Alice.public()),
 					ValidatorId::from(Sr25519Keyring::Bob.public()),
-					ValidatorId::from(Sr25519Keyring::Charlie.public()),
-					ValidatorId::from(Sr25519Keyring::Dave.public()),
-					ValidatorId::from(Sr25519Keyring::Eve.public()),
-					ValidatorId::from(Sr25519Keyring::Ferdie.public()),
-					ValidatorId::from(Sr25519Keyring::One.public()),
 				],
-				random_seed: [99; 32],
 				..Default::default()
 			}),
 			_ => None,
 		});
 
-		assert_eq!(scheduler::ClaimQueue::<Test>::get().len(), 2);
+		scheduler::Pallet::<Test>::set_claim_queue(BTreeMap::from([
+			(CoreIndex::from(0), VecDeque::from([assignment_a.clone()])),
+			// Leave a hole for core 1.
+			(CoreIndex::from(2), VecDeque::from([assignment_b.clone(), assignment_b.clone()])),
+		]));
 
-		let groups = ValidatorGroups::<Test>::get();
-		assert_eq!(groups.len(), 5);
+		// Decrease number of cores to 1.
+		let old_config = config;
+		let mut new_config = old_config.clone();
+		new_config.scheduler_params.num_cores = 1;
 
-		assert_ok!(Paras::schedule_para_cleanup(para_b));
+		// Session change.
+		// Assignment A had its shot already so will be dropped for good.
+		// The two assignments of B will be pushed back to the assignment provider.
+		run_to_block(3, |number| match number {
+			3 => Some(SessionChangeNotification {
+				new_config: new_config.clone(),
+				prev_config: old_config.clone(),
+				validators: vec![ValidatorId::from(Sr25519Keyring::Alice.public())],
+				..Default::default()
+			}),
+			_ => None,
+		});
 
-		// Add assignment
-		MockAssigner::add_test_assignment(assignment_a.clone());
+		let mut claim_queue = scheduler::ClaimQueue::<Test>::get();
+		assert_eq!(Scheduler::claim_queue_len(), 1);
 
-		run_to_end_of_block(2, |number| match number {
-			2 => Some(SessionChangeNotification {
-				new_config: default_config(),
+		// There's only one assignment for B because run_to_block also calls advance_claim_queue at
+		// the end.
+		assert_eq!(
+			claim_queue.remove(&CoreIndex(0)).unwrap(),
+			[assignment_b.clone()].into_iter().collect::<VecDeque<_>>()
+		);
+
+		// No more assignments now.
+		Scheduler::advance_claim_queue(&Default::default());
+		assert_eq!(Scheduler::claim_queue_len(), 0);
+	});
+}
+
+#[test]
+fn session_change_increasing_lookahead() {
+	let mut config = default_config();
+	config.scheduler_params.num_cores = 2;
+	config.scheduler_params.lookahead = 2;
+	let genesis_config = genesis_config(&config);
+
+	let para_a = ParaId::from(3_u32);
+	let para_b = ParaId::from(4_u32);
+
+	let assignment_a = Assignment::Bulk(para_a);
+	let assignment_b = Assignment::Bulk(para_b);
+
+	new_test_ext(genesis_config).execute_with(|| {
+		// Add 2 paras
+		register_para(para_a);
+		register_para(para_b);
+
+		// start a new session to activate, 2 validators for 2 cores.
+		run_to_block(1, |number| match number {
+			1 => Some(SessionChangeNotification {
+				new_config: config.clone(),
 				validators: vec![
 					ValidatorId::from(Sr25519Keyring::Alice.public()),
 					ValidatorId::from(Sr25519Keyring::Bob.public()),
-					ValidatorId::from(Sr25519Keyring::Charlie.public()),
-					ValidatorId::from(Sr25519Keyring::Dave.public()),
-					ValidatorId::from(Sr25519Keyring::Eve.public()),
-					ValidatorId::from(Sr25519Keyring::Ferdie.public()),
-					ValidatorId::from(Sr25519Keyring::One.public()),
 				],
-				random_seed: [99; 32],
 				..Default::default()
 			}),
 			_ => None,
 		});
 
-		Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), 3);
+		MockAssigner::add_test_assignment(assignment_a.clone());
+		MockAssigner::add_test_assignment(assignment_a.clone());
+		MockAssigner::add_test_assignment(assignment_a.clone());
+		MockAssigner::add_test_assignment(assignment_b.clone());
+		MockAssigner::add_test_assignment(assignment_b.clone());
+		MockAssigner::add_test_assignment(assignment_b.clone());
+
+		// Lookahead is currently 2.
 
-		assert_eq!(
-			scheduler::ClaimQueue::<Test>::get(),
-			vec![(
-				CoreIndex(0),
-				vec![ParasEntry::new(
-					Assignment::Bulk(para_a),
-					// At end of block 2
-					coretime_ttl + 2
-				)]
-				.into_iter()
-				.collect()
-			)]
-			.into_iter()
-			.collect()
-		);
+		run_to_block(2, |_| None);
 
-		// Add para back
-		schedule_blank_para(para_b);
+		{
+			let mut claim_queue = scheduler::ClaimQueue::<Test>::get();
+			assert_eq!(Scheduler::claim_queue_len(), 4);
 
-		// Add assignments
-		MockAssigner::add_test_assignment(assignment_a.clone());
-		MockAssigner::add_test_assignment(assignment_b.clone());
+			assert_eq!(
+				claim_queue.remove(&CoreIndex(0)).unwrap(),
+				[assignment_a.clone(), assignment_a.clone()]
+					.into_iter()
+					.collect::<VecDeque<_>>()
+			);
+			assert_eq!(
+				claim_queue.remove(&CoreIndex(1)).unwrap(),
+				[assignment_a.clone(), assignment_a.clone()]
+					.into_iter()
+					.collect::<VecDeque<_>>()
+			);
+		}
+
+		// Increase lookahead to 4.
+		let old_config = config;
+		let mut new_config = old_config.clone();
+		new_config.scheduler_params.lookahead = 4;
 
 		run_to_block(3, |number| match number {
 			3 => Some(SessionChangeNotification {
-				new_config: default_config(),
+				new_config: new_config.clone(),
+				prev_config: old_config.clone(),
 				validators: vec![
 					ValidatorId::from(Sr25519Keyring::Alice.public()),
 					ValidatorId::from(Sr25519Keyring::Bob.public()),
-					ValidatorId::from(Sr25519Keyring::Charlie.public()),
-					ValidatorId::from(Sr25519Keyring::Dave.public()),
-					ValidatorId::from(Sr25519Keyring::Eve.public()),
-					ValidatorId::from(Sr25519Keyring::Ferdie.public()),
-					ValidatorId::from(Sr25519Keyring::One.public()),
 				],
-				random_seed: [99; 32],
 				..Default::default()
 			}),
 			_ => None,
 		});
 
-		assert_eq!(scheduler::ClaimQueue::<Test>::get().len(), 2);
-
-		let groups = ValidatorGroups::<Test>::get();
-		assert_eq!(groups.len(), 5);
-
-		Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), 4);
+		{
+			let mut claim_queue = scheduler::ClaimQueue::<Test>::get();
+			assert_eq!(Scheduler::claim_queue_len(), 6);
 
-		assert_eq!(
-			scheduler::ClaimQueue::<Test>::get(),
-			vec![
-				(
-					CoreIndex(0),
-					vec![ParasEntry::new(
-						Assignment::Bulk(para_a),
-						// At block 3
-						coretime_ttl + 3
-					)]
+			assert_eq!(
+				claim_queue.remove(&CoreIndex(0)).unwrap(),
+				[assignment_a.clone(), assignment_a.clone(), assignment_b.clone()]
 					.into_iter()
-					.collect()
-				),
-				(
-					CoreIndex(1),
-					vec![ParasEntry::new(
-						Assignment::Bulk(para_b),
-						// At block 3
-						coretime_ttl + 3
-					)]
+					.collect::<VecDeque<_>>()
+			);
+			assert_eq!(
+				claim_queue.remove(&CoreIndex(1)).unwrap(),
+				[assignment_a.clone(), assignment_b.clone(), assignment_b.clone()]
 					.into_iter()
-					.collect()
-				),
-			]
-			.into_iter()
-			.collect()
-		);
+					.collect::<VecDeque<_>>()
+			);
+		}
 	});
 }
diff --git a/polkadot/runtime/parachains/src/session_info.rs b/polkadot/runtime/parachains/src/session_info.rs
index ea05c1aacaa946aa50bcb68e69930acbd60d79eb..0ec01755095bc140f23870b0a5b5fc0ad4d35cd9 100644
--- a/polkadot/runtime/parachains/src/session_info.rs
+++ b/polkadot/runtime/parachains/src/session_info.rs
@@ -135,8 +135,8 @@ impl<T: Config> Pallet<T> {
 		let assignment_keys = AssignmentKeysUnsafe::<T>::get();
 		let active_set = shared::ActiveValidatorIndices::<T>::get();
 
-		let validator_groups = scheduler::ValidatorGroups::<T>::get().into();
-		let n_cores = scheduler::AvailabilityCores::<T>::get().len() as u32;
+		let validator_groups = scheduler::ValidatorGroups::<T>::get();
+		let n_cores = validator_groups.len() as u32;
 		let zeroth_delay_tranche_width = config.zeroth_delay_tranche_width;
 		let relay_vrf_modulo_samples = config.relay_vrf_modulo_samples;
 		let n_delay_tranches = config.n_delay_tranches;
@@ -177,7 +177,7 @@ impl<T: Config> Pallet<T> {
 			validators, // these are from the notification and are thus already correct.
 			discovery_keys: take_active_subset_and_inactive(&active_set, &discovery_keys),
 			assignment_keys: take_active_subset(&active_set, &assignment_keys),
-			validator_groups,
+			validator_groups: validator_groups.into(),
 			n_cores,
 			zeroth_delay_tranche_width,
 			relay_vrf_modulo_samples,
diff --git a/polkadot/runtime/parachains/src/shared.rs b/polkadot/runtime/parachains/src/shared.rs
index f582bf0d90b5e400ac6ff558653bc69cd68930df..473c1aba7a066d198f7bcdad8ad920cf7cf955e8 100644
--- a/polkadot/runtime/parachains/src/shared.rs
+++ b/polkadot/runtime/parachains/src/shared.rs
@@ -80,6 +80,7 @@ impl<Hash: PartialEq + Copy, BlockNumber: AtLeast32BitUnsigned + Copy>
 	/// Add a new relay-parent to the allowed relay parents, along with info about the header.
 	/// Provide a maximum ancestry length for the buffer, which will cause old relay-parents to be
 	/// pruned.
+	/// If the relay parent hash is already present, do nothing.
 	pub(crate) fn update(
 		&mut self,
 		relay_parent: Hash,
@@ -88,6 +89,11 @@ impl<Hash: PartialEq + Copy, BlockNumber: AtLeast32BitUnsigned + Copy>
 		number: BlockNumber,
 		max_ancestry_len: u32,
 	) {
+		if self.buffer.iter().any(|info| info.relay_parent == relay_parent) {
+			// Already present.
+			return
+		}
+
 		let claim_queue = transpose_claim_queue(claim_queue);
 
 		// + 1 for the most recent block, which is always allowed.
diff --git a/polkadot/runtime/parachains/src/shared/tests.rs b/polkadot/runtime/parachains/src/shared/tests.rs
index 6da84e254f051f393612adb2f7f6a5e9a60cfc7c..f7ea5148ce33417740f09b9e39b64f94b0a2c29b 100644
--- a/polkadot/runtime/parachains/src/shared/tests.rs
+++ b/polkadot/runtime/parachains/src/shared/tests.rs
@@ -43,7 +43,13 @@ fn tracker_earliest_block_number() {
 	let max_ancestry_len = 4;
 	let now = 4;
 	for i in 1..now {
-		tracker.update(Hash::zero(), Hash::zero(), Default::default(), i, max_ancestry_len);
+		tracker.update(
+			Hash::from([i as u8; 32]),
+			Hash::zero(),
+			Default::default(),
+			i,
+			max_ancestry_len,
+		);
 		assert_eq!(tracker.hypothetical_earliest_block_number(i + 1, max_ancestry_len), 0);
 	}
 
@@ -53,7 +59,7 @@ fn tracker_earliest_block_number() {
 }
 
 #[test]
-fn tracker_claim_queue_remap() {
+fn tracker_claim_queue_transpose() {
 	let mut tracker = AllowedRelayParentsTracker::<Hash, u32>::default();
 
 	let mut claim_queue = BTreeMap::new();
@@ -120,6 +126,14 @@ fn tracker_acquire_info() {
 		Some((s, b)) if s.state_root == state_root && b == 0
 	);
 
+	// Try to push a duplicate. Should be ignored.
+	tracker.update(relay_parent, Hash::repeat_byte(13), Default::default(), 0, max_ancestry_len);
+	assert_eq!(tracker.buffer.len(), 1);
+	assert_matches!(
+		tracker.acquire_info(relay_parent, None),
+		Some((s, b)) if s.state_root == state_root && b == 0
+	);
+
 	let (relay_parent, state_root) = blocks[1];
 	tracker.update(relay_parent, state_root, Default::default(), 1u32, max_ancestry_len);
 	let (relay_parent, state_root) = blocks[2];
diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs
index 6266febaa0b22786e75780d43c70e359406f130a..e94b6666ed072cc8bc9996eb96bf2d2c4b2ae816 100644
--- a/polkadot/runtime/rococo/src/lib.rs
+++ b/polkadot/runtime/rococo/src/lib.rs
@@ -1710,7 +1710,8 @@ pub mod migrations {
         // permanent
         pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
         parachains_inclusion::migration::MigrateToV1<Runtime>,
-        parachains_shared::migration::MigrateToV1<Runtime>,
+		parachains_shared::migration::MigrateToV1<Runtime>,
+        parachains_scheduler::migration::MigrateV2ToV3<Runtime>,
     );
 }
 
diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_paras_inherent.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_paras_inherent.rs
index b7b3d12d4d92c301c4b4a8754890676d3ca291d6..71a0bb6fc7b2dae1ce238f3a94a77e54058abca1 100644
--- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_paras_inherent.rs
+++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_paras_inherent.rs
@@ -17,9 +17,9 @@
 //! Autogenerated weights for `polkadot_runtime_parachains::paras_inherent`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
-//! DATE: 2024-08-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2024-10-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! HOSTNAME: `runner-augrssgt-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024
 
 // Executed Command:
@@ -54,10 +54,12 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
 	/// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1)
 	/// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `ParaScheduler::ClaimQueue` (r:1 w:0)
+	/// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0)
 	/// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1)
-	/// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0)
+	/// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0)
 	/// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0)
@@ -70,23 +72,21 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0)
 	/// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0)
-	/// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1)
-	/// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0)
+	/// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0)
 	/// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Session::DisabledValidators` (r:1 w:0)
 	/// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	fn enter_empty() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `8967`
-		//  Estimated: `12432`
-		// Minimum execution time: 144_751_000 picoseconds.
-		Weight::from_parts(153_966_000, 0)
-			.saturating_add(Weight::from_parts(0, 12432))
+		//  Measured:  `42760`
+		//  Estimated: `46225`
+		// Minimum execution time: 228_252_000 picoseconds.
+		Weight::from_parts(234_368_000, 0)
+			.saturating_add(Weight::from_parts(0, 46225))
 			.saturating_add(T::DbWeight::get().reads(15))
-			.saturating_add(T::DbWeight::get().writes(5))
+			.saturating_add(T::DbWeight::get().writes(3))
 	}
 	/// Storage: `ParaInherent::Included` (r:1 w:1)
 	/// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
@@ -94,10 +94,12 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
 	/// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1)
 	/// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1)
+	/// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0)
 	/// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1)
-	/// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0)
+	/// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0)
 	/// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0)
@@ -128,16 +130,14 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0)
 	/// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0)
-	/// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1)
-	/// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1)
-	/// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	/// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0)
+	/// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0)
 	/// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Session::DisabledValidators` (r:1 w:0)
 	/// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1)
+	/// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1)
 	/// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `Paras::Heads` (r:0 w:1)
@@ -146,19 +146,18 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `Paras::MostRecentContext` (r:0 w:1)
 	/// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	/// The range of component `v` is `[10, 200]`.
+	/// The range of component `v` is `[400, 1024]`.
 	fn enter_variable_disputes(v: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `67786`
-		//  Estimated: `73726 + v * (23 ±0)`
-		// Minimum execution time: 972_311_000 picoseconds.
-		Weight::from_parts(645_559_304, 0)
-			.saturating_add(Weight::from_parts(0, 73726))
-			// Standard Error: 53_320
-			.saturating_add(Weight::from_parts(41_795_493, 0).saturating_mul(v.into()))
-			.saturating_add(T::DbWeight::get().reads(25))
-			.saturating_add(T::DbWeight::get().writes(15))
-			.saturating_add(Weight::from_parts(0, 23).saturating_mul(v.into()))
+		//  Measured:  `203155`
+		//  Estimated: `209095`
+		// Minimum execution time: 17_510_015_000 picoseconds.
+		Weight::from_parts(948_178_084, 0)
+			.saturating_add(Weight::from_parts(0, 209095))
+			// Standard Error: 16_345
+			.saturating_add(Weight::from_parts(41_627_958, 0).saturating_mul(v.into()))
+			.saturating_add(T::DbWeight::get().reads(26))
+			.saturating_add(T::DbWeight::get().writes(16))
 	}
 	/// Storage: `ParaInherent::Included` (r:1 w:1)
 	/// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
@@ -166,10 +165,12 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
 	/// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1)
 	/// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `ParaScheduler::ClaimQueue` (r:1 w:0)
+	/// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0)
 	/// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1)
-	/// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0)
+	/// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0)
 	/// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0)
@@ -182,25 +183,21 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0)
 	/// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0)
-	/// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1)
-	/// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1)
-	/// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	/// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0)
+	/// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0)
 	/// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Session::DisabledValidators` (r:1 w:0)
 	/// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	fn enter_bitfields() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `42374`
-		//  Estimated: `48314`
-		// Minimum execution time: 361_262_000 picoseconds.
-		Weight::from_parts(370_617_000, 0)
-			.saturating_add(Weight::from_parts(0, 48314))
-			.saturating_add(T::DbWeight::get().reads(17))
-			.saturating_add(T::DbWeight::get().writes(7))
+		//  Measured:  `76066`
+		//  Estimated: `82006`
+		// Minimum execution time: 501_266_000 picoseconds.
+		Weight::from_parts(517_989_000, 0)
+			.saturating_add(Weight::from_parts(0, 82006))
+			.saturating_add(T::DbWeight::get().reads(16))
+			.saturating_add(T::DbWeight::get().writes(4))
 	}
 	/// Storage: `ParaInherent::Included` (r:1 w:1)
 	/// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
@@ -208,10 +205,12 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
 	/// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1)
 	/// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1)
+	/// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0)
 	/// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1)
-	/// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0)
+	/// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0)
 	/// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0)
@@ -236,12 +235,8 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0)
 	/// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0)
-	/// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1)
-	/// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1)
-	/// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	/// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0)
+	/// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Paras::CurrentCodeHash` (r:1 w:0)
 	/// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `Paras::ParaLifecycles` (r:1 w:0)
@@ -252,6 +247,8 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Session::DisabledValidators` (r:1 w:0)
 	/// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1)
+	/// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasDisputes::Included` (r:0 w:1)
 	/// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1)
@@ -262,18 +259,18 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `Paras::MostRecentContext` (r:0 w:1)
 	/// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	/// The range of component `v` is `[101, 200]`.
+	/// The range of component `v` is `[2, 3]`.
 	fn enter_backed_candidates_variable(v: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `42830`
-		//  Estimated: `48770`
-		// Minimum execution time: 1_322_051_000 picoseconds.
-		Weight::from_parts(1_379_846_608, 0)
-			.saturating_add(Weight::from_parts(0, 48770))
-			// Standard Error: 19_959
-			.saturating_add(Weight::from_parts(24_630, 0).saturating_mul(v.into()))
+		//  Measured:  `76842`
+		//  Estimated: `82782`
+		// Minimum execution time: 1_861_799_000 picoseconds.
+		Weight::from_parts(1_891_155_030, 0)
+			.saturating_add(Weight::from_parts(0, 82782))
+			// Standard Error: 2_415_944
+			.saturating_add(Weight::from_parts(7_924_189, 0).saturating_mul(v.into()))
 			.saturating_add(T::DbWeight::get().reads(26))
-			.saturating_add(T::DbWeight::get().writes(15))
+			.saturating_add(T::DbWeight::get().writes(14))
 	}
 	/// Storage: `ParaInherent::Included` (r:1 w:1)
 	/// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
@@ -281,10 +278,12 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
 	/// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1)
 	/// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1)
+	/// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0)
 	/// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1)
-	/// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0)
+	/// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0)
 	/// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0)
@@ -309,12 +308,8 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0)
 	/// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0)
-	/// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1)
-	/// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1)
-	/// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	/// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0)
+	/// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Paras::CurrentCodeHash` (r:1 w:0)
 	/// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `Paras::FutureCodeHash` (r:1 w:0)
@@ -329,6 +324,8 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Session::DisabledValidators` (r:1 w:0)
 	/// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1)
+	/// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasDisputes::Included` (r:0 w:1)
 	/// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1)
@@ -341,12 +338,12 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	fn enter_backed_candidate_code_upgrade() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `42843`
-		//  Estimated: `48783`
-		// Minimum execution time: 37_550_515_000 picoseconds.
-		Weight::from_parts(37_886_489_000, 0)
-			.saturating_add(Weight::from_parts(0, 48783))
+		//  Measured:  `76855`
+		//  Estimated: `82795`
+		// Minimum execution time: 37_682_370_000 picoseconds.
+		Weight::from_parts(41_118_445_000, 0)
+			.saturating_add(Weight::from_parts(0, 82795))
 			.saturating_add(T::DbWeight::get().reads(28))
-			.saturating_add(T::DbWeight::get().writes(15))
+			.saturating_add(T::DbWeight::get().writes(14))
 	}
 }
diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs
index 96104ace7d73d8671965bf8852b4e739bf21245a..9e7ee488af72d8b9993a307997066702a6e7591a 100644
--- a/polkadot/runtime/test-runtime/src/lib.rs
+++ b/polkadot/runtime/test-runtime/src/lib.rs
@@ -31,11 +31,11 @@ use codec::Encode;
 use pallet_transaction_payment::FungibleAdapter;
 
 use polkadot_runtime_parachains::{
-	assigner_parachains as parachains_assigner_parachains,
-	configuration as parachains_configuration,
-	configuration::ActiveConfigHrmpChannelSizeAndCapacityRatio, disputes as parachains_disputes,
-	disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp,
-	inclusion as parachains_inclusion, initializer as parachains_initializer,
+	assigner_coretime as parachains_assigner_coretime, configuration as parachains_configuration,
+	configuration::ActiveConfigHrmpChannelSizeAndCapacityRatio, coretime,
+	disputes as parachains_disputes, disputes::slashing as parachains_slashing,
+	dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion,
+	initializer as parachains_initializer, on_demand as parachains_on_demand,
 	origin as parachains_origin, paras as parachains_paras,
 	paras_inherent as parachains_paras_inherent, runtime_api_impl::v11 as runtime_impl,
 	scheduler as parachains_scheduler, session_info as parachains_session_info,
@@ -49,8 +49,10 @@ use frame_election_provider_support::{
 use frame_support::{
 	construct_runtime, derive_impl,
 	genesis_builder_helper::{build_state, get_preset},
+	pallet_prelude::Get,
 	parameter_types,
 	traits::{KeyOwnerProofSystem, WithdrawReasons},
+	PalletId,
 };
 use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId};
 use pallet_session::historical as session_historical;
@@ -92,6 +94,7 @@ use sp_staking::SessionIndex;
 #[cfg(any(feature = "std", test))]
 use sp_version::NativeVersion;
 use sp_version::RuntimeVersion;
+use xcm::v4::{Assets, InteriorLocation, Location, SendError, SendResult, SendXcm, XcmHash};
 
 pub use pallet_balances::Call as BalancesCall;
 #[cfg(feature = "std")]
@@ -559,7 +562,7 @@ impl parachains_initializer::Config for Runtime {
 	type Randomness = pallet_babe::RandomnessFromOneEpochAgo<Runtime>;
 	type ForceOrigin = frame_system::EnsureRoot<AccountId>;
 	type WeightInfo = ();
-	type CoretimeOnNewSession = ();
+	type CoretimeOnNewSession = Coretime;
 }
 
 impl parachains_session_info::Config for Runtime {
@@ -577,15 +580,26 @@ impl parachains_paras::Config for Runtime {
 	type QueueFootprinter = ParaInclusion;
 	type NextSessionRotation = Babe;
 	type OnNewHead = ();
-	type AssignCoretime = ();
+	type AssignCoretime = CoretimeAssignmentProvider;
 }
 
 parameter_types! {
 	pub const BrokerId: u32 = 10u32;
+	pub MaxXcmTransactWeight: Weight = Weight::from_parts(10_000_000, 10_000);
+}
+
+pub struct BrokerPot;
+impl Get<InteriorLocation> for BrokerPot {
+	fn get() -> InteriorLocation {
+		unimplemented!()
+	}
 }
 
 parameter_types! {
 	pub const OnDemandTrafficDefaultValue: FixedU128 = FixedU128::from_u32(1);
+	// Keep 2 timeslices worth of revenue information.
+	pub const MaxHistoricalRevenue: BlockNumber = 2 * 5;
+	pub const OnDemandPalletId: PalletId = PalletId(*b"py/ondmd");
 }
 
 impl parachains_dmp::Config for Runtime {}
@@ -607,10 +621,48 @@ impl parachains_hrmp::Config for Runtime {
 	type WeightInfo = parachains_hrmp::TestWeightInfo;
 }
 
-impl parachains_assigner_parachains::Config for Runtime {}
+impl parachains_on_demand::Config for Runtime {
+	type RuntimeEvent = RuntimeEvent;
+	type Currency = Balances;
+	type TrafficDefaultValue = OnDemandTrafficDefaultValue;
+	type WeightInfo = parachains_on_demand::TestWeightInfo;
+	type MaxHistoricalRevenue = MaxHistoricalRevenue;
+	type PalletId = OnDemandPalletId;
+}
+
+impl parachains_assigner_coretime::Config for Runtime {}
 
 impl parachains_scheduler::Config for Runtime {
-	type AssignmentProvider = ParaAssignmentProvider;
+	type AssignmentProvider = CoretimeAssignmentProvider;
+}
+
+pub struct DummyXcmSender;
+impl SendXcm for DummyXcmSender {
+	type Ticket = ();
+	fn validate(
+		_: &mut Option<Location>,
+		_: &mut Option<xcm::v4::Xcm<()>>,
+	) -> SendResult<Self::Ticket> {
+		Ok(((), Assets::new()))
+	}
+
+	/// Actually carry out the delivery operation for a previously validated message sending.
+	fn deliver(_ticket: Self::Ticket) -> Result<XcmHash, SendError> {
+		Ok([0u8; 32])
+	}
+}
+
+impl coretime::Config for Runtime {
+	type RuntimeOrigin = RuntimeOrigin;
+	type RuntimeEvent = RuntimeEvent;
+	type Currency = pallet_balances::Pallet<Runtime>;
+	type BrokerId = BrokerId;
+	type WeightInfo = crate::coretime::TestWeightInfo;
+	type SendXcm = DummyXcmSender;
+	type MaxXcmTransactWeight = MaxXcmTransactWeight;
+	type BrokerPotLocation = BrokerPot;
+	type AssetTransactor = ();
+	type AccountToLocation = ();
 }
 
 impl paras_sudo_wrapper::Config for Runtime {}
@@ -753,7 +805,9 @@ construct_runtime! {
 		Xcm: pallet_xcm,
 		ParasDisputes: parachains_disputes,
 		ParasSlashing: parachains_slashing,
-		ParaAssignmentProvider: parachains_assigner_parachains,
+		OnDemandAssignmentProvider: parachains_on_demand,
+		CoretimeAssignmentProvider: parachains_assigner_coretime,
+		Coretime: coretime,
 
 		Sudo: pallet_sudo,
 
diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs
index b7dae533224c61f9da1ee1863fe5a9eb8408aa50..461be186ee516fdd3aabbe9b574e7739a61f0773 100644
--- a/polkadot/runtime/westend/src/lib.rs
+++ b/polkadot/runtime/westend/src/lib.rs
@@ -1805,6 +1805,7 @@ pub mod migrations {
 			MaxAgentsToMigrate,
 		>,
 		parachains_shared::migration::MigrateToV1<Runtime>,
+		parachains_scheduler::migration::MigrateV2ToV3<Runtime>,
 	);
 }
 
diff --git a/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_paras_inherent.rs b/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_paras_inherent.rs
index 32f6f28f242621f1cf496046fbdf94f3ba7b819c..36aafc1d2f2a7f97becebd367bddffec67dcdf6f 100644
--- a/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_paras_inherent.rs
+++ b/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_paras_inherent.rs
@@ -17,9 +17,9 @@
 //! Autogenerated weights for `polkadot_runtime_parachains::paras_inherent`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
-//! DATE: 2024-08-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2024-10-17, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! HOSTNAME: `runner-dr4vwrkf-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024
 
 // Executed Command:
@@ -54,10 +54,12 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
 	/// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1)
 	/// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `ParaScheduler::ClaimQueue` (r:1 w:0)
+	/// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0)
 	/// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1)
-	/// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0)
+	/// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0)
 	/// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0)
@@ -70,23 +72,21 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0)
 	/// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0)
-	/// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1)
-	/// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0)
+	/// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0)
 	/// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Session::DisabledValidators` (r:1 w:0)
 	/// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	fn enter_empty() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `37553`
-		//  Estimated: `41018`
-		// Minimum execution time: 237_414_000 picoseconds.
-		Weight::from_parts(245_039_000, 0)
-			.saturating_add(Weight::from_parts(0, 41018))
+		//  Measured:  `37559`
+		//  Estimated: `41024`
+		// Minimum execution time: 217_257_000 picoseconds.
+		Weight::from_parts(228_878_000, 0)
+			.saturating_add(Weight::from_parts(0, 41024))
 			.saturating_add(T::DbWeight::get().reads(15))
-			.saturating_add(T::DbWeight::get().writes(5))
+			.saturating_add(T::DbWeight::get().writes(3))
 	}
 	/// Storage: `ParaInherent::Included` (r:1 w:1)
 	/// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
@@ -94,10 +94,12 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
 	/// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1)
 	/// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1)
+	/// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0)
 	/// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1)
-	/// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0)
+	/// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0)
 	/// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0)
@@ -134,16 +136,14 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0)
 	/// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0)
-	/// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1)
-	/// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1)
-	/// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	/// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0)
+	/// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0)
 	/// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Session::DisabledValidators` (r:1 w:0)
 	/// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1)
+	/// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1)
 	/// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `Paras::Heads` (r:0 w:1)
@@ -152,19 +152,18 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `Paras::MostRecentContext` (r:0 w:1)
 	/// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	/// The range of component `v` is `[10, 1024]`.
+	/// The range of component `v` is `[400, 1024]`.
 	fn enter_variable_disputes(v: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `199504`
-		//  Estimated: `205444 + v * (5 ±0)`
-		// Minimum execution time: 1_157_489_000 picoseconds.
-		Weight::from_parts(629_243_559, 0)
-			.saturating_add(Weight::from_parts(0, 205444))
-			// Standard Error: 10_997
-			.saturating_add(Weight::from_parts(50_752_930, 0).saturating_mul(v.into()))
-			.saturating_add(T::DbWeight::get().reads(28))
-			.saturating_add(T::DbWeight::get().writes(16))
-			.saturating_add(Weight::from_parts(0, 5).saturating_mul(v.into()))
+		//  Measured:  `117547`
+		//  Estimated: `123487`
+		// Minimum execution time: 21_077_090_000 picoseconds.
+		Weight::from_parts(703_350_265, 0)
+			.saturating_add(Weight::from_parts(0, 123487))
+			// Standard Error: 21_944
+			.saturating_add(Weight::from_parts(51_197_317, 0).saturating_mul(v.into()))
+			.saturating_add(T::DbWeight::get().reads(29))
+			.saturating_add(T::DbWeight::get().writes(17))
 	}
 	/// Storage: `ParaInherent::Included` (r:1 w:1)
 	/// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
@@ -172,10 +171,12 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
 	/// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1)
 	/// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `ParaScheduler::ClaimQueue` (r:1 w:0)
+	/// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0)
 	/// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1)
-	/// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0)
+	/// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0)
 	/// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0)
@@ -188,25 +189,21 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0)
 	/// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0)
-	/// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1)
-	/// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1)
-	/// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	/// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0)
+	/// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0)
 	/// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Session::DisabledValidators` (r:1 w:0)
 	/// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	fn enter_bitfields() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `75131`
-		//  Estimated: `81071`
-		// Minimum execution time: 466_928_000 picoseconds.
-		Weight::from_parts(494_342_000, 0)
-			.saturating_add(Weight::from_parts(0, 81071))
-			.saturating_add(T::DbWeight::get().reads(17))
-			.saturating_add(T::DbWeight::get().writes(7))
+		//  Measured:  `74967`
+		//  Estimated: `80907`
+		// Minimum execution time: 487_605_000 picoseconds.
+		Weight::from_parts(506_014_000, 0)
+			.saturating_add(Weight::from_parts(0, 80907))
+			.saturating_add(T::DbWeight::get().reads(16))
+			.saturating_add(T::DbWeight::get().writes(4))
 	}
 	/// Storage: `ParaInherent::Included` (r:1 w:1)
 	/// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
@@ -214,10 +211,12 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
 	/// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1)
 	/// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1)
+	/// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0)
 	/// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1)
-	/// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0)
+	/// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0)
 	/// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0)
@@ -248,12 +247,8 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0)
 	/// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0)
-	/// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1)
-	/// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1)
-	/// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	/// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0)
+	/// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Paras::CurrentCodeHash` (r:1 w:0)
 	/// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `Paras::ParaLifecycles` (r:1 w:0)
@@ -264,6 +259,8 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Session::DisabledValidators` (r:1 w:0)
 	/// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1)
+	/// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasDisputes::Included` (r:0 w:1)
 	/// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1)
@@ -277,15 +274,15 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// The range of component `v` is `[2, 5]`.
 	fn enter_backed_candidates_variable(v: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `76369`
-		//  Estimated: `82309`
-		// Minimum execution time: 1_468_919_000 picoseconds.
-		Weight::from_parts(1_433_315_477, 0)
-			.saturating_add(Weight::from_parts(0, 82309))
-			// Standard Error: 419_886
-			.saturating_add(Weight::from_parts(42_880_485, 0).saturating_mul(v.into()))
+		//  Measured:  `76491`
+		//  Estimated: `82431`
+		// Minimum execution time: 1_496_985_000 picoseconds.
+		Weight::from_parts(1_466_448_265, 0)
+			.saturating_add(Weight::from_parts(0, 82431))
+			// Standard Error: 403_753
+			.saturating_add(Weight::from_parts(44_015_233, 0).saturating_mul(v.into()))
 			.saturating_add(T::DbWeight::get().reads(29))
-			.saturating_add(T::DbWeight::get().writes(16))
+			.saturating_add(T::DbWeight::get().writes(15))
 	}
 	/// Storage: `ParaInherent::Included` (r:1 w:1)
 	/// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
@@ -293,10 +290,12 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
 	/// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1)
 	/// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1)
+	/// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0)
 	/// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1)
-	/// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0)
+	/// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0)
 	/// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0)
@@ -327,12 +326,8 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0)
 	/// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0)
-	/// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1)
-	/// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1)
-	/// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	/// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0)
+	/// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Paras::CurrentCodeHash` (r:1 w:0)
 	/// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `Paras::FutureCodeHash` (r:1 w:0)
@@ -347,6 +342,8 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `Session::DisabledValidators` (r:1 w:0)
 	/// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	/// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1)
+	/// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `ParasDisputes::Included` (r:0 w:1)
 	/// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1)
@@ -359,12 +356,12 @@ impl<T: frame_system::Config> polkadot_runtime_parachains::paras_inherent::Weigh
 	/// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	fn enter_backed_candidate_code_upgrade() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `76382`
-		//  Estimated: `82322`
-		// Minimum execution time: 34_577_233_000 picoseconds.
-		Weight::from_parts(39_530_352_000, 0)
-			.saturating_add(Weight::from_parts(0, 82322))
+		//  Measured:  `76504`
+		//  Estimated: `82444`
+		// Minimum execution time: 40_136_167_000 picoseconds.
+		Weight::from_parts(41_572_376_000, 0)
+			.saturating_add(Weight::from_parts(0, 82444))
 			.saturating_add(T::DbWeight::get().reads(31))
-			.saturating_add(T::DbWeight::get().writes(16))
+			.saturating_add(T::DbWeight::get().writes(15))
 	}
 }
diff --git a/polkadot/zombienet_tests/functional/0015-coretime-shared-core.zndsl b/polkadot/zombienet_tests/functional/0015-coretime-shared-core.zndsl
index b8b8887df85782268735f906eab771ddfcc22fcf..8f883dffa5e1091c32e44a6d319c6512ec316fff 100644
--- a/polkadot/zombienet_tests/functional/0015-coretime-shared-core.zndsl
+++ b/polkadot/zombienet_tests/functional/0015-coretime-shared-core.zndsl
@@ -5,8 +5,8 @@ Creds: config
 validator: reports node_roles is 4
 
 # register paras 2 by 2 to speed up the test. registering all at once will exceed the weight limit.
-validator-0: js-script ./0015-force-register-paras.js with "2000,2001" return is 0 within 600 seconds
-validator-0: js-script ./0015-force-register-paras.js with "2002,2003" return is 0 within 600 seconds
+validator-0: js-script ./force-register-paras.js with "2000,2001" return is 0 within 600 seconds
+validator-0: js-script ./force-register-paras.js with "2002,2003" return is 0 within 600 seconds
 # assign core 0 to be shared by all paras.
 validator-0: js-script ./assign-core.js with "0,2000,14400,2001,14400,2002,14400,2003,14400" return is 0 within 600 seconds
 
diff --git a/polkadot/zombienet_tests/functional/0017-sync-backing.toml b/polkadot/zombienet_tests/functional/0017-sync-backing.toml
new file mode 100644
index 0000000000000000000000000000000000000000..2550054c8dadaf75b34b4c4a50f402576f3f5266
--- /dev/null
+++ b/polkadot/zombienet_tests/functional/0017-sync-backing.toml
@@ -0,0 +1,48 @@
+[settings]
+timeout = 1000
+
+[relaychain]
+default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}"
+chain = "rococo-local"
+
+[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params]
+  max_candidate_depth = 0
+  allowed_ancestry_len = 0
+
+[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params]
+  lookahead = 2
+  group_rotation_frequency = 4
+
+[relaychain.default_resources]
+limits = { memory = "4G", cpu = "2" }
+requests = { memory = "2G", cpu = "1" }
+
+  [[relaychain.node_groups]]
+  name = "alice"
+  args = [ "-lparachain=debug" ]
+  count = 10
+
+[[parachains]]
+id = 2000
+addToGenesis = true
+
+  [parachains.collator]
+  name = "collator01"
+  image = "{{COL_IMAGE}}"
+  command = "adder-collator"
+  args = ["-lparachain=debug"]
+
+[[parachains]]
+id = 2001
+cumulus_based = true
+
+  [parachains.collator]
+  name = "collator02"
+  image = "{{CUMULUS_IMAGE}}"
+  command = "polkadot-parachain"
+  args = ["-lparachain=debug"]
+
+[types.Header]
+number = "u64"
+parent_hash = "Hash"
+post_state = "Hash"
\ No newline at end of file
diff --git a/polkadot/zombienet_tests/functional/0017-sync-backing.zndsl b/polkadot/zombienet_tests/functional/0017-sync-backing.zndsl
new file mode 100644
index 0000000000000000000000000000000000000000..a53de784b2d1349fa890ce51984b6865c2c94fd5
--- /dev/null
+++ b/polkadot/zombienet_tests/functional/0017-sync-backing.zndsl
@@ -0,0 +1,22 @@
+Description: Test we are producing 12-second parachain blocks if sync backing is configured
+Network: ./0017-sync-backing.toml
+Creds: config
+
+# Check authority status.
+alice: reports node_roles is 4
+
+# Ensure parachains are registered.
+alice: parachain 2000 is registered within 60 seconds
+alice: parachain 2001 is registered within 60 seconds
+
+# Ensure parachains made progress.
+alice: reports substrate_block_height{status="finalized"} is at least 10 within 100 seconds
+
+# This parachains should produce blocks at 12s clip, let's assume an 14s rate, allowing for
+# some slots to be missed on slower machines
+alice: parachain 2000 block height is at least 21 within 300 seconds
+alice: parachain 2000 block height is lower than 25 within 2 seconds
+
+# This should already have produced the needed blocks
+alice: parachain 2001 block height is at least 21 within 10 seconds
+alice: parachain 2001 block height is lower than 25 within 2 seconds
diff --git a/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.toml b/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.toml
new file mode 100644
index 0000000000000000000000000000000000000000..745c4f9e24b1bbcd79608a3ddfc20d8ea833be6e
--- /dev/null
+++ b/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.toml
@@ -0,0 +1,39 @@
+[settings]
+timeout = 1000
+
+[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params]
+  max_validators_per_core = 2
+  lookahead = 2
+  num_cores = 4
+  group_rotation_frequency = 4
+
+
+[relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params]
+  needed_approvals = 3
+
+[relaychain]
+default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}"
+chain = "rococo-local"
+command = "polkadot"
+
+  [[relaychain.node_groups]]
+  name = "validator"
+  args = ["-lruntime=debug,parachain=debug"]
+  count = 4
+
+[[parachains]]
+id = 2000
+register_para = false
+onboard_as_parachain = false
+add_to_genesis = false
+chain = "glutton-westend-local-2000"
+    [parachains.genesis.runtimeGenesis.patch.glutton]
+    compute = "50000000"
+    storage = "2500000000"
+    trashDataCount = 5120
+
+    [parachains.collator]
+    name = "collator-2000"
+    image = "{{CUMULUS_IMAGE}}"
+    command = "polkadot-parachain"
+    args = ["-lparachain=debug"]
diff --git a/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.zndsl b/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.zndsl
new file mode 100644
index 0000000000000000000000000000000000000000..80ecf6ae1b9be635d904733ec07a2b71f652c0cd
--- /dev/null
+++ b/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.zndsl
@@ -0,0 +1,11 @@
+Description: Test that a parachain can keep producing blocks even if the other parachain with which it's sharing a core doesn't
+Network: ./0018-shared-core-idle-parachain.toml
+Creds: config
+
+validator: reports node_roles is 4
+
+validator-0: js-script ./force-register-paras.js with "2000" return is 0 within 600 seconds
+# assign core 0 to be shared by two paras, but only one exists
+validator-0: js-script ./assign-core.js with "0,2000,28800,2001,28800" return is 0 within 600 seconds
+
+collator-2000: reports block height is at least 10 within 180 seconds
diff --git a/polkadot/zombienet_tests/functional/0015-force-register-paras.js b/polkadot/zombienet_tests/functional/force-register-paras.js
similarity index 100%
rename from polkadot/zombienet_tests/functional/0015-force-register-paras.js
rename to polkadot/zombienet_tests/functional/force-register-paras.js
diff --git a/prdoc/pr_5461.prdoc b/prdoc/pr_5461.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..bf343216e29ba14c09434c557aa838c40ffce6e7
--- /dev/null
+++ b/prdoc/pr_5461.prdoc
@@ -0,0 +1,20 @@
+title: "runtime: remove ttl"
+
+doc:
+  - audience: [Runtime Dev, Node Dev]
+    description: |
+      Resolves https://github.com/paritytech/polkadot-sdk/issues/4776. Removes the scheduling ttl used in the relay chain
+      runtimes, as well as the availability timeout retries. The extrinsics for configuring these two values are also removed.
+      Deprecates the `ttl` and `max_availability_timeouts` fields of the `HostConfiguration` primitive.
+
+crates:
+  - name: polkadot-runtime-parachains
+    bump: major
+  - name: polkadot-primitives
+    bump: major
+  - name: rococo-runtime
+    bump: major
+  - name: westend-runtime
+    bump: major
+  - name: polkadot
+    bump: none