From c63b557e502fbdf6454797d933147df0f2f4a82b Mon Sep 17 00:00:00 2001 From: Chris Sosnin <48099298+slumber@users.noreply.github.com> Date: Wed, 19 Jul 2023 15:06:58 +0300 Subject: [PATCH] paras: count upgrade delay from inclusion (#7486) * paras: count upgrade delay from inclusion * fix warning * rename check cause block number field * rename inclusion_parent -> included_at --- .../runtime/parachains/src/inclusion/mod.rs | 5 +- .../runtime/parachains/src/inclusion/tests.rs | 142 +++++++++++++++++- polkadot/runtime/parachains/src/paras/mod.rs | 49 ++++-- 3 files changed, 178 insertions(+), 18 deletions(-) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index ffa74487c08..c71657d1ac4 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -884,10 +884,13 @@ impl<T: Config> Pallet<T> { // initial weight is config read. let mut weight = T::DbWeight::get().reads_writes(1, 0); if let Some(new_code) = commitments.new_validation_code { + // Block number of candidate's inclusion. + let now = <frame_system::Pallet<T>>::block_number(); + weight.saturating_add(<paras::Pallet<T>>::schedule_code_upgrade( receipt.descriptor.para_id, new_code, - relay_parent_number, + now, &config, )); } diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index d921c000b14..59f5e9414d0 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -28,6 +28,7 @@ use crate::{ }; use primitives::{SignedAvailabilityBitfields, UncheckedSignedAvailabilityBitfields}; +use assert_matches::assert_matches; use frame_support::assert_noop; use keyring::Sr25519Keyring; use parity_scale_codec::DecodeAll; @@ -44,7 +45,8 @@ use test_helpers::{dummy_collator, dummy_collator_signature, dummy_validation_co fn default_config() -> HostConfiguration<BlockNumber> { let mut config = HostConfiguration::default(); config.parathread_cores = 1; - config.max_code_size = 3; + config.max_code_size = 0b100000; + config.max_head_data_size = 0b100000; config } @@ -1902,3 +1904,141 @@ fn aggregate_origin_decode_regression_check() { let decoded = AggregateMessageOrigin::decode_all(&mut &raw[..]); assert_eq!(decoded, Ok(ump), "Migration needed for AggregateMessageOrigin"); } + +#[test] +fn para_upgrade_delay_scheduled_from_inclusion() { + let chain_a = ParaId::from(1_u32); + + // The block number of the relay-parent for testing. + const RELAY_PARENT_NUM: BlockNumber = 4; + + let paras = vec![(chain_a, ParaKind::Parachain)]; + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Ferdie, + ]; + let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory()); + for validator in validators.iter() { + Keystore::sr25519_generate_new( + &*keystore, + PARACHAIN_KEY_TYPE_ID, + Some(&validator.to_seed()), + ) + .unwrap(); + } + let validator_public = validator_pubkeys(&validators); + + new_test_ext(genesis_config(paras)).execute_with(|| { + shared::Pallet::<Test>::set_active_validators_ascending(validator_public.clone()); + shared::Pallet::<Test>::set_session_index(5); + + let new_validation_code: ValidationCode = vec![1, 2, 3, 4, 5].into(); + let new_validation_code_hash = new_validation_code.hash(); + + // Otherwise upgrade is no-op. + assert_ne!(new_validation_code, dummy_validation_code()); + + run_to_block(5, |_| None); + + let signing_context = + SigningContext { parent_hash: System::parent_hash(), session_index: 5 }; + + let group_validators = |group_index: GroupIndex| { + match group_index { + group_index if group_index == GroupIndex::from(0) => Some(vec![0, 1, 2, 3, 4]), + _ => panic!("Group index out of bounds for 1 parachain"), + } + .map(|vs| vs.into_iter().map(ValidatorIndex).collect::<Vec<_>>()) + }; + + let core_lookup = |core| match core { + core if core == CoreIndex::from(0) => Some(chain_a), + _ => None, + }; + + let chain_a_assignment = CoreAssignment { + core: CoreIndex::from(0), + para_id: chain_a, + kind: AssignmentKind::Parachain, + group_idx: GroupIndex::from(0), + }; + + let mut candidate_a = TestCandidateBuilder { + para_id: chain_a, + relay_parent: System::parent_hash(), + pov_hash: Hash::repeat_byte(1), + persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), + new_validation_code: Some(new_validation_code.clone()), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_a); + + let backed_a = back_candidate( + candidate_a.clone(), + &validators, + group_validators(GroupIndex::from(0)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + ); + + let ProcessedCandidates { core_indices: occupied_cores, .. } = + ParaInclusion::process_candidates( + Default::default(), + vec![backed_a], + vec![chain_a_assignment.clone()], + &group_validators, + ) + .expect("candidates scheduled, in order, and backed"); + + assert_eq!(occupied_cores, vec![CoreIndex::from(0)]); + + // Run a couple of blocks before the inclusion. + run_to_block(7, |_| None); + + let mut bare_bitfield = default_bitfield(); + *bare_bitfield.0.get_mut(0).unwrap() = true; + + let signed_bitfields = validators + .iter() + .enumerate() + .map(|(i, key)| { + sign_bitfield( + &keystore, + key, + ValidatorIndex(i as _), + bare_bitfield.clone(), + &signing_context, + ) + .into() + }) + .collect::<Vec<_>>(); + + let checked_bitfields = simple_sanitize_bitfields( + signed_bitfields, + DisputedBitfield::zeros(expected_bits()), + expected_bits(), + ); + + let v = process_bitfields(expected_bits(), checked_bitfields, core_lookup); + assert_eq!(vec![(CoreIndex(0), candidate_a.hash())], v); + + assert!(<PendingAvailability<Test>>::get(&chain_a).is_none()); + assert!(<PendingAvailabilityCommitments<Test>>::get(&chain_a).is_none()); + + let active_vote_state = paras::Pallet::<Test>::active_vote_state(&new_validation_code_hash) + .expect("prechecking must be initiated"); + + let cause = &active_vote_state.causes()[0]; + // Upgrade block is the block of inclusion, not candidate's parent. + assert_matches!(cause, + paras::PvfCheckCause::Upgrade { id, included_at } + if id == &chain_a && included_at == &7 + ); + }); +} diff --git a/polkadot/runtime/parachains/src/paras/mod.rs b/polkadot/runtime/parachains/src/paras/mod.rs index c1a53579c35..98c5075a4c9 100644 --- a/polkadot/runtime/parachains/src/paras/mod.rs +++ b/polkadot/runtime/parachains/src/paras/mod.rs @@ -367,17 +367,21 @@ impl TypeInfo for ParaKind { /// This enum describes a reason why a particular PVF pre-checking vote was initiated. When the /// PVF vote in question is concluded, this enum indicates what changes should be performed. -#[derive(Encode, Decode, TypeInfo)] -enum PvfCheckCause<BlockNumber> { +#[derive(Debug, Encode, Decode, TypeInfo)] +pub(crate) enum PvfCheckCause<BlockNumber> { /// PVF vote was initiated by the initial onboarding process of the given para. Onboarding(ParaId), /// PVF vote was initiated by signalling of an upgrade by the given para. Upgrade { /// The ID of the parachain that initiated or is waiting for the conclusion of pre-checking. id: ParaId, - /// The relay-chain block number that was used as the relay-parent for the parablock that - /// initiated the upgrade. - relay_parent_number: BlockNumber, + /// The relay-chain block number of **inclusion** of candidate that that initiated the upgrade. + /// + /// It's important to count upgrade enactment delay from the inclusion of this candidate instead + /// of its relay parent -- in order to keep PVF available in case of chain reversions. + /// + /// See https://github.com/paritytech/polkadot/issues/4601 for detailed explanation. + included_at: BlockNumber, }, } @@ -400,7 +404,7 @@ enum PvfCheckOutcome { /// This struct describes the current state of an in-progress PVF pre-checking vote. #[derive(Encode, Decode, TypeInfo)] -struct PvfCheckActiveVoteState<BlockNumber> { +pub(crate) struct PvfCheckActiveVoteState<BlockNumber> { // The two following vectors have their length equal to the number of validators in the active // set. They start with all zeroes. A 1 is set at an index when the validator at the that index // makes a vote. Once a 1 is set for either of the vectors, that validator cannot vote anymore. @@ -466,6 +470,11 @@ impl<BlockNumber> PvfCheckActiveVoteState<BlockNumber> { None } } + + #[cfg(test)] + pub(crate) fn causes(&self) -> &[PvfCheckCause<BlockNumber>] { + self.causes.as_slice() + } } pub trait WeightInfo { @@ -1473,9 +1482,8 @@ impl<T: Config> Pallet<T> { PvfCheckCause::Onboarding(id) => { weight += Self::proceed_with_onboarding(*id, sessions_observed); }, - PvfCheckCause::Upgrade { id, relay_parent_number } => { - weight += - Self::proceed_with_upgrade(*id, code_hash, now, *relay_parent_number, cfg); + PvfCheckCause::Upgrade { id, included_at } => { + weight += Self::proceed_with_upgrade(*id, code_hash, now, *included_at, cfg); }, } } @@ -1519,10 +1527,10 @@ impl<T: Config> Pallet<T> { // against the new validation code. // // Here we are trying to choose the block number that will have `validation_upgrade_delay` - // blocks from the relay-parent of the block that schedule code upgrade but no less than - // `minimum_validation_upgrade_delay`. We want this delay out of caution so that when - // the last vote for pre-checking comes the parachain will have some time until the upgrade - // finally takes place. + // blocks from the relay-parent of inclusion of the the block that scheduled code upgrade + // but no less than `minimum_validation_upgrade_delay`. We want this delay out of caution + // so that when the last vote for pre-checking comes the parachain will have some time until + // the upgrade finally takes place. let expected_at = cmp::max( relay_parent_number + cfg.validation_upgrade_delay, now + cfg.minimum_validation_upgrade_delay, @@ -1765,10 +1773,12 @@ impl<T: Config> Pallet<T> { /// /// The new code should not be equal to the current one, otherwise the upgrade will be aborted. /// If there is already a scheduled code upgrade for the para, this is a no-op. + /// + /// Inclusion block number specifies relay parent which enacted candidate initiating the upgrade. pub(crate) fn schedule_code_upgrade( id: ParaId, new_code: ValidationCode, - relay_parent_number: BlockNumberFor<T>, + inclusion_block_number: BlockNumberFor<T>, cfg: &configuration::HostConfiguration<BlockNumberFor<T>>, ) -> Weight { let mut weight = T::DbWeight::get().reads(1); @@ -1810,7 +1820,7 @@ impl<T: Config> Pallet<T> { UpgradeRestrictionSignal::<T>::insert(&id, UpgradeRestriction::Present); weight += T::DbWeight::get().reads_writes(1, 1); - let next_possible_upgrade_at = relay_parent_number + cfg.validation_upgrade_cooldown; + let next_possible_upgrade_at = inclusion_block_number + cfg.validation_upgrade_cooldown; UpgradeCooldowns::<T>::mutate(|upgrade_cooldowns| { let insert_idx = upgrade_cooldowns .binary_search_by_key(&next_possible_upgrade_at, |&(_, b)| b) @@ -1819,7 +1829,7 @@ impl<T: Config> Pallet<T> { }); weight += Self::kick_off_pvf_check( - PvfCheckCause::Upgrade { id, relay_parent_number }, + PvfCheckCause::Upgrade { id, included_at: inclusion_block_number }, code_hash, new_code, cfg, @@ -2120,6 +2130,13 @@ impl<T: Config> Pallet<T> { Heads::<T>::insert(&id, &genesis_data.genesis_head); } + + #[cfg(test)] + pub(crate) fn active_vote_state( + code_hash: &ValidationCodeHash, + ) -> Option<PvfCheckActiveVoteState<BlockNumberFor<T>>> { + PvfActiveVoteMap::<T>::get(code_hash) + } } /// An overlay over the `Parachains` storage entry that provides a convenient interface for adding -- GitLab