Skip to content
paras.rs 117 KiB
Newer Older
	pub(crate) fn initializer_initialize(now: T::BlockNumber) -> Weight {
		let weight = Self::prune_old_code(now);
		weight + Self::process_scheduled_upgrade_changes(now)
	/// Called by the initializer to finalize the configuration pallet.
	pub(crate) fn initializer_finalize() {}

	/// Called by the initializer to note that a new session has started.
	/// Returns the list of outgoing paras from the actions queue.
	pub(crate) fn initializer_on_new_session(
		notification: &SessionChangeNotification<T::BlockNumber>,
	) -> Vec<ParaId> {
		let outgoing_paras = Self::apply_actions_queue(notification.session_index);
		Self::groom_ongoing_pvf_votes(&notification.new_config, notification.validators.len());
	/// The validation code of live para.
	pub(crate) fn current_code(para_id: &ParaId) -> Option<ValidationCode> {
		Self::current_code_hash(para_id).and_then(|code_hash| {
			let code = CodeByHash::<T>::get(&code_hash);
			if code.is_none() {
				log::error!(
					"Pallet paras storage is inconsistent, code not found for hash {}",
					code_hash,
				);
				debug_assert!(false, "inconsistent paras storages");
			}
			code
		})
	}

	// Apply all para actions queued for the given session index.
	//
	// The actions to take are based on the lifecycle of of the paras.
	//
	// The final state of any para after the actions queue should be as a
	// parachain, parathread, or not registered. (stable states)
	//
	// Returns the list of outgoing paras from the actions queue.
	fn apply_actions_queue(session: SessionIndex) -> Vec<ParaId> {
		let actions = ActionsQueue::<T>::take(session);
		let mut parachains = <Self as Store>::Parachains::get();
		let now = <frame_system::Pallet<T>>::block_number();
		let mut outgoing = Vec::new();

		for para in actions {
			let lifecycle = ParaLifecycles::<T>::get(&para);
				None | Some(ParaLifecycle::Parathread) | Some(ParaLifecycle::Parachain) => { /* Nothing to do... */
				},
				Some(ParaLifecycle::Onboarding) => {
					if let Some(genesis_data) = <Self as Store>::UpcomingParasGenesis::take(&para) {
						if genesis_data.parachain {
							if let Err(i) = parachains.binary_search(&para) {
								parachains.insert(i, para);
							}
							ParaLifecycles::<T>::insert(&para, ParaLifecycle::Parachain);
							ParaLifecycles::<T>::insert(&para, ParaLifecycle::Parathread);
						// HACK: see the notice in `schedule_para_initialize`.
						//
						// Apparently, this is left over from a prior version of the runtime.
						// To handle this we just insert the code and link the current code hash
						// to it.
						if !genesis_data.validation_code.0.is_empty() {
							let code_hash = genesis_data.validation_code.hash();
							Self::increase_code_ref(&code_hash, &genesis_data.validation_code);
							<Self as Store>::CurrentCodeHash::insert(&para, code_hash);
						}

						<Self as Store>::Heads::insert(&para, genesis_data.genesis_head);
					}
				},
				// Upgrade a parathread to a parachain
				Some(ParaLifecycle::UpgradingParathread) => {
					if let Err(i) = parachains.binary_search(&para) {
						parachains.insert(i, para);
					}
					ParaLifecycles::<T>::insert(&para, ParaLifecycle::Parachain);
				},
				// Downgrade a parachain to a parathread
				Some(ParaLifecycle::DowngradingParachain) => {
					if let Ok(i) = parachains.binary_search(&para) {
						parachains.remove(i);
					}
					ParaLifecycles::<T>::insert(&para, ParaLifecycle::Parathread);
				},
				// Offboard a parathread or parachain from the system
				Some(ParaLifecycle::OffboardingParachain) |
				Some(ParaLifecycle::OffboardingParathread) => {
					if let Ok(i) = parachains.binary_search(&para) {
						parachains.remove(i);
					}

					<Self as Store>::Heads::remove(&para);
					<Self as Store>::FutureCodeUpgrades::remove(&para);
					<Self as Store>::UpgradeGoAheadSignal::remove(&para);
					<Self as Store>::UpgradeRestrictionSignal::remove(&para);
					ParaLifecycles::<T>::remove(&para);
					let removed_future_code_hash = <Self as Store>::FutureCodeHash::take(&para);
					if let Some(removed_future_code_hash) = removed_future_code_hash {
						Self::decrease_code_ref(&removed_future_code_hash);
					}
					let removed_code_hash = <Self as Store>::CurrentCodeHash::take(&para);
					if let Some(removed_code_hash) = removed_code_hash {
						Self::note_past_code(para, now, now, removed_code_hash);
		if !outgoing.is_empty() {
			// Filter offboarded parachains from the upcoming upgrades and upgrade cooldowns list.
			//
			// We do it after the offboarding to get away with only a single read/write per list.
			//
			// NOTE both of those iterates over the list and the outgoing. We do not expect either
			//      of these to be large. Thus should be fine.
			<Self as Store>::UpcomingUpgrades::mutate(|upcoming_upgrades| {
				*upcoming_upgrades = mem::take(upcoming_upgrades)
					.into_iter()
					.filter(|&(ref para, _)| !outgoing.contains(para))
					.collect();
			});
			<Self as Store>::UpgradeCooldowns::mutate(|upgrade_cooldowns| {
				*upgrade_cooldowns = mem::take(upgrade_cooldowns)
					.into_iter()
					.filter(|&(ref para, _)| !outgoing.contains(para))
					.collect();
			});
		}

		// Place the new parachains set in storage.
		<Self as Store>::Parachains::set(parachains);

		return outgoing
	// note replacement of the code of para with given `id`, which occured in the
	// context of the given relay-chain block number. provide the replaced code.
	//
	// `at` for para-triggered replacement is the block number of the relay-chain
	// block in whose context the parablock was executed
	// (i.e. number of `relay_parent` in the receipt)
	fn note_past_code(
		id: ParaId,
		at: T::BlockNumber,
		now: T::BlockNumber,
		old_code_hash: ValidationCodeHash,
	) -> Weight {
		<Self as Store>::PastCodeMeta::mutate(&id, |past_meta| {
			past_meta.note_replacement(at, now);
		});

		<Self as Store>::PastCodeHash::insert(&(id, at), old_code_hash);

		// Schedule pruning for this past-code to be removed as soon as it
		// exits the slashing window.
		<Self as Store>::PastCodePruning::mutate(|pruning| {
			let insert_idx =
				pruning.binary_search_by_key(&now, |&(_, b)| b).unwrap_or_else(|idx| idx);
			pruning.insert(insert_idx, (id, now));
		});

		T::DbWeight::get().reads_writes(2, 3)
	}

	// looks at old code metadata, compares them to the current acceptance window, and prunes those
	// that are too old.
	fn prune_old_code(now: T::BlockNumber) -> Weight {
		let config = configuration::Pallet::<T>::config();
		let code_retention_period = config.code_retention_period;
		if now <= code_retention_period {
			let weight = T::DbWeight::get().reads_writes(1, 0);
			return weight
		}

		// The height of any changes we no longer should keep around.
		let pruning_height = now - (code_retention_period + One::one());
		let pruning_tasks_done = <Self as Store>::PastCodePruning::mutate(
			|pruning_tasks: &mut Vec<(_, T::BlockNumber)>| {
				let (pruning_tasks_done, pruning_tasks_to_do) = {
					// find all past code that has just exited the pruning window.
					let up_to_idx =
						pruning_tasks.iter().take_while(|&(_, at)| at <= &pruning_height).count();
					(up_to_idx, pruning_tasks.drain(..up_to_idx))
				};

				for (para_id, _) in pruning_tasks_to_do {
					let full_deactivate = <Self as Store>::PastCodeMeta::mutate(&para_id, |meta| {
						for pruned_repl_at in meta.prune_up_to(pruning_height) {
							let removed_code_hash =
								<Self as Store>::PastCodeHash::take(&(para_id, pruned_repl_at));

							if let Some(removed_code_hash) = removed_code_hash {
								Self::decrease_code_ref(&removed_code_hash);
							} else {
								log::warn!(
									target: LOG_TARGET,
									"Missing code for removed hash {:?}",
									removed_code_hash,
								);
							}
						meta.is_empty() && Self::para_head(&para_id).is_none()
					});

					// This parachain has been removed and now the vestigial code
					// has been removed from the state. clean up meta as well.
					if full_deactivate {
						<Self as Store>::PastCodeMeta::remove(&para_id);
					}
				}

				pruning_tasks_done as u64

		// 1 read for the meta for each pruning task, 1 read for the config
		// 2 writes: updating the meta and pruning the code
		T::DbWeight::get().reads_writes(1 + pruning_tasks_done, 2 * pruning_tasks_done)
	}

	/// Process the timers related to upgrades. Specifically, the upgrade go ahead signals toggle
	/// and the upgrade cooldown restrictions.
	///
	/// Takes the current block number and returns the weight consumed.
	fn process_scheduled_upgrade_changes(now: T::BlockNumber) -> Weight {
		let upgrades_signaled = <Self as Store>::UpcomingUpgrades::mutate(
			|upcoming_upgrades: &mut Vec<(ParaId, T::BlockNumber)>| {
				let num = upcoming_upgrades.iter().take_while(|&(_, at)| at <= &now).count();
				for (para, _) in upcoming_upgrades.drain(..num) {
					<Self as Store>::UpgradeGoAheadSignal::insert(&para, UpgradeGoAhead::GoAhead);
				}
				num
			},
		);
		let cooldowns_expired = <Self as Store>::UpgradeCooldowns::mutate(
			|upgrade_cooldowns: &mut Vec<(ParaId, T::BlockNumber)>| {
				let num = upgrade_cooldowns.iter().take_while(|&(_, at)| at <= &now).count();
				for (para, _) in upgrade_cooldowns.drain(..num) {
					<Self as Store>::UpgradeRestrictionSignal::remove(&para);
				}
				num
			},
		);

		T::DbWeight::get().reads_writes(2, upgrades_signaled as u64 + cooldowns_expired as u64)
	}

	/// Goes over all PVF votes in progress, reinitializes ballots, increments ages and prunes the
	/// active votes that reached their time-to-live.
	fn groom_ongoing_pvf_votes(
		cfg: &configuration::HostConfiguration<T::BlockNumber>,
		new_n_validators: usize,
	) -> Weight {
		let mut weight = T::DbWeight::get().reads(1);

		let potentially_active_votes = PvfActiveVoteList::<T>::get();

		// Initially empty list which contains all the PVF active votes that made it through this
		// session change.
		//
		// **Ordered** as well as `PvfActiveVoteList`.
		let mut actually_active_votes = Vec::with_capacity(potentially_active_votes.len());

		for vote_subject in potentially_active_votes {
			let mut vote_state = match PvfActiveVoteMap::<T>::take(&vote_subject) {
				Some(v) => v,
				None => {
					// This branch should never be reached. This is due to the fact that the set of
					// `PvfActiveVoteMap`'s keys is always equal to the set of items found in
					// `PvfActiveVoteList`.
					log::warn!(
						target: LOG_TARGET,
						"The PvfActiveVoteMap is out of sync with PvfActiveVoteList!",
					);
					debug_assert!(false);
					continue
				},
			};

			vote_state.age += 1;
			if vote_state.age < cfg.pvf_voting_ttl {
				weight += T::DbWeight::get().writes(1);
				vote_state.reinitialize_ballots(new_n_validators);
				PvfActiveVoteMap::<T>::insert(&vote_subject, vote_state);

				// push maintaining the original order.
				actually_active_votes.push(vote_subject);
			} else {
				// TTL is reached. Reject.
				weight += Self::enact_pvf_rejected(&vote_subject, vote_state.causes);
			}
		}

		weight += T::DbWeight::get().writes(1);
		PvfActiveVoteList::<T>::put(actually_active_votes);

		weight
	}

	fn enact_pvf_accepted(
		now: T::BlockNumber,
		code_hash: &ValidationCodeHash,
		causes: &[PvfCheckCause<T::BlockNumber>],
		sessions_observed: SessionIndex,
		cfg: &configuration::HostConfiguration<T::BlockNumber>,
	) -> Weight {
		let mut weight = 0;
		for cause in causes {
			match cause {
				PvfCheckCause::Onboarding(id) => {
					weight += Self::proceed_with_onboarding(*id, sessions_observed);
				},
				PvfCheckCause::Upgrade { id, relay_parent_number } => {
					weight +=
						Self::proceed_with_upgrade(*id, code_hash, now, *relay_parent_number, cfg);
				},
			}
		}
		weight
	}

	fn proceed_with_onboarding(id: ParaId, sessions_observed: SessionIndex) -> Weight {
		let weight = T::DbWeight::get().reads_writes(2, 1);

		// we should onboard only after `SESSION_DELAY` sessions but we should take
		// into account the number of sessions the PVF pre-checking occupied.
		//
		// we cannot onboard at the current session, so it must be at least one
		// session ahead.
		let onboard_at: SessionIndex = shared::Pallet::<T>::session_index() +
			cmp::max(shared::SESSION_DELAY.saturating_sub(sessions_observed), 1);

		ActionsQueue::<T>::mutate(onboard_at, |v| {
			if let Err(i) = v.binary_search(&id) {
				v.insert(i, id);
			}
		});

		weight
	}

	fn proceed_with_upgrade(
		id: ParaId,
		code_hash: &ValidationCodeHash,
		now: T::BlockNumber,
		relay_parent_number: T::BlockNumber,
		cfg: &configuration::HostConfiguration<T::BlockNumber>,
	) -> Weight {
		let mut weight = 0;

		// Compute the relay-chain block number starting at which the code upgrade is ready to be
		// applied.
		//
		// The first parablock that has a relay-parent higher or at the same height of `expected_at`
		// will trigger the code upgrade. The parablock that comes after that will be validated
		// against the new validation code.
		//
		// Here we are trying to choose the block number that will have `validation_upgrade_delay`
		// blocks from the relay-parent of the block that schedule code upgrade but no less than
		// `minimum_validation_upgrade_delay`. We want this delay out of caution so that when
		// the last vote for pre-checking comes the parachain will have some time until the upgrade
		// finally takes place.
		let expected_at = cmp::max(
			relay_parent_number + cfg.validation_upgrade_delay,
			now + cfg.minimum_validation_upgrade_delay,
		);

		weight += T::DbWeight::get().reads_writes(1, 4);
		FutureCodeUpgrades::<T>::insert(&id, expected_at);

		<Self as Store>::UpcomingUpgrades::mutate(|upcoming_upgrades| {
			let insert_idx = upcoming_upgrades
				.binary_search_by_key(&expected_at, |&(_, b)| b)
				.unwrap_or_else(|idx| idx);
			upcoming_upgrades.insert(insert_idx, (id, expected_at));
		});

		let expected_at = expected_at.saturated_into();
		let log = ConsensusLog::ParaScheduleUpgradeCode(id, *code_hash, expected_at);
		<frame_system::Pallet<T>>::deposit_log(log.into());

		weight
	}

	fn enact_pvf_rejected(
		code_hash: &ValidationCodeHash,
		causes: Vec<PvfCheckCause<T::BlockNumber>>,
	) -> Weight {
		let mut weight = T::DbWeight::get().writes(1);

		for cause in causes {
			// Whenever PVF pre-checking is started or a new cause is added to it, the RC is bumped.
			// Now we need to unbump it.
			weight += Self::decrease_code_ref(code_hash);

			match cause {
				PvfCheckCause::Onboarding(id) => {
					// Here we need to undo everything that was done during `schedule_para_initialize`.
					// Essentially, the logic is similar to offboarding, with exception that before
					// actual onboarding the parachain did not have a chance to reach to upgrades.
					// Therefore we can skip all the upgrade related storage items here.
					weight += T::DbWeight::get().writes(3);
					UpcomingParasGenesis::<T>::remove(&id);
					CurrentCodeHash::<T>::remove(&id);
					ParaLifecycles::<T>::remove(&id);
				},
				PvfCheckCause::Upgrade { id, .. } => {
					weight += T::DbWeight::get().writes(2);
					UpgradeGoAheadSignal::<T>::insert(&id, UpgradeGoAhead::Abort);
					FutureCodeHash::<T>::remove(&id);
				},
			}
		}

		weight
	}

	/// Verify that `schedule_para_initialize` can be called successfully.
	///
	/// Returns false if para is already registered in the system.
	pub fn can_schedule_para_initialize(id: &ParaId) -> bool {
		ParaLifecycles::<T>::get(id).is_none()
	/// Schedule a para to be initialized. If the validation code is not already stored in the
	/// code storage, then a PVF pre-checking process will be initiated.
	/// Only after the PVF pre-checking succeeds can the para be onboarded. Note, that calling this
	/// does not guarantee that the parachain will eventually be onboarded. This can happen in case
	/// the PVF does not pass PVF pre-checking.
	///
	/// The Para ID should be not activated in this module. The validation code supplied in
	/// `genesis_data` should not be empty. If those conditions are not met, then the para cannot
	/// be onboarded.
	pub(crate) fn schedule_para_initialize(
		id: ParaId,
		mut genesis_data: ParaGenesisArgs,
	) -> DispatchResult {
		// Make sure parachain isn't already in our system and that the onboarding parameters are
		// valid.
		ensure!(Self::can_schedule_para_initialize(&id), Error::<T>::CannotOnboard);
		ensure!(!genesis_data.validation_code.0.is_empty(), Error::<T>::CannotOnboard);
		ParaLifecycles::<T>::insert(&id, ParaLifecycle::Onboarding);

		// HACK: here we are doing something nasty.
		//
		// In order to fix the [soaking issue] we insert the code eagerly here. When the onboarding
		// is finally enacted, we do not need to insert the code anymore. Therefore, there is no
		// reason for the validation code to be copied into the `ParaGenesisArgs`. We also do not
		// want to risk it by copying the validation code needlessly to not risk adding more
		// memory pressure.
		//
		// That said, we also want to preserve `ParaGenesisArgs` as it is, for now. There are two
		// reasons:
		//
		// - Doing it within the context of the PR that introduces this change is undesirable, since
		//   it is already a big change, and that change would require a migration. Moreover, if we
		//   run the new version of the runtime, there will be less things to worry about during
		//   the eventual proper migration.
		//
		// - This data type already is used for generating genesis, and changing it will probably
		//   introduce some unnecessary burden.
		//
		// So instead of going through it right now, we will do something sneaky. Specifically:
		//
		// - Insert the `CurrentCodeHash` now, instead during the onboarding. That would allow to
		//   get rid of hashing of the validation code when onboarding.
		//
		// - Replace `validation_code` with a sentinel value: an empty vector. This should be fine
		//   as long we do not allow registering parachains with empty code. At the moment of writing
		//   this should already be the case.
		//
		// - Empty value is treated as the current code is already inserted during the onboarding.
		//
		// This is only an intermediate solution and should be fixed in foreseable future.
		//
		// [soaking issue]: https://github.com/paritytech/polkadot/issues/3918
		let validation_code =
			mem::replace(&mut genesis_data.validation_code, ValidationCode(Vec::new()));
		UpcomingParasGenesis::<T>::insert(&id, genesis_data);
		let validation_code_hash = validation_code.hash();
		<Self as Store>::CurrentCodeHash::insert(&id, validation_code_hash);

		let cfg = configuration::Pallet::<T>::config();
		Self::kick_off_pvf_check(
			PvfCheckCause::Onboarding(id),
			validation_code_hash,
			validation_code,
			&cfg,
		);
	}

	/// Schedule a para to be cleaned up at the start of the next session.
	/// Will return error if either is true:
	///
	/// - para is not a stable parachain or parathread (i.e. [`ParaLifecycle::is_stable`] is `false`)
	/// - para has a pending upgrade.
	///
	/// No-op if para is not registered at all.
	pub(crate) fn schedule_para_cleanup(id: ParaId) -> DispatchResult {
		// Disallow offboarding in case there is an upcoming upgrade.
		//
		// This is not a fundamential limitation but rather simplification: it allows us to get
		// away without introducing additional logic for pruning and, more importantly, enacting
		// ongoing PVF pre-checking votes. It also removes some nasty edge cases.
		//
		// This implicitly assumes that the given para exists, i.e. it's lifecycle != None.
		if FutureCodeHash::<T>::contains_key(&id) {
			return Err(Error::<T>::CannotOffboard.into())
		}

		let lifecycle = ParaLifecycles::<T>::get(&id);
			// If para is not registered, nothing to do!
			None => return Ok(()),
			Some(ParaLifecycle::Parathread) => {
				ParaLifecycles::<T>::insert(&id, ParaLifecycle::OffboardingParathread);
			Some(ParaLifecycle::Parachain) => {
				ParaLifecycles::<T>::insert(&id, ParaLifecycle::OffboardingParachain);
			_ => return Err(Error::<T>::CannotOffboard)?,
		let scheduled_session = Self::scheduled_session();
		ActionsQueue::<T>::mutate(scheduled_session, |v| {
			if let Err(i) = v.binary_search(&id) {
				v.insert(i, id);
			}
		});

		Ok(())
	}

	/// Schedule a parathread to be upgraded to a parachain.
	///
	/// Will return error if `ParaLifecycle` is not `Parathread`.
	pub(crate) fn schedule_parathread_upgrade(id: ParaId) -> DispatchResult {
		let scheduled_session = Self::scheduled_session();
		let lifecycle = ParaLifecycles::<T>::get(&id).ok_or(Error::<T>::NotRegistered)?;
		ensure!(lifecycle == ParaLifecycle::Parathread, Error::<T>::CannotUpgrade);

		ParaLifecycles::<T>::insert(&id, ParaLifecycle::UpgradingParathread);
		ActionsQueue::<T>::mutate(scheduled_session, |v| {
			if let Err(i) = v.binary_search(&id) {
				v.insert(i, id);
	}

	/// Schedule a parachain to be downgraded to a parathread.
	///
	/// Noop if `ParaLifecycle` is not `Parachain`.
	pub(crate) fn schedule_parachain_downgrade(id: ParaId) -> DispatchResult {
		let scheduled_session = Self::scheduled_session();
		let lifecycle = ParaLifecycles::<T>::get(&id).ok_or(Error::<T>::NotRegistered)?;
		ensure!(lifecycle == ParaLifecycle::Parachain, Error::<T>::CannotDowngrade);

		ParaLifecycles::<T>::insert(&id, ParaLifecycle::DowngradingParachain);
		ActionsQueue::<T>::mutate(scheduled_session, |v| {
			if let Err(i) = v.binary_search(&id) {
				v.insert(i, id);
	/// Schedule a future code upgrade of the given parachain.
	///
	/// If the new code is not known, then the PVF pre-checking will be started for that validation
	/// code. In case the validation code does not pass the PVF pre-checking process, the
	/// upgrade will be aborted.
	///
	/// Only after the code is approved by the process, the upgrade can be scheduled. Specifically,
	/// the relay-chain block number will be determined at which the upgrade will take place. We
	/// call that block `expected_at`.
	///
	/// Once the candidate with the relay-parent >= `expected_at` is enacted, the new validation code
	/// will be applied. Therefore, the new code will be used to validate the next candidate.
	/// The new code should not be equal to the current one, otherwise the upgrade will be aborted.
	/// If there is already a scheduled code upgrade for the para, this is a no-op.
	pub(crate) fn schedule_code_upgrade(
		id: ParaId,
		new_code: ValidationCode,
		relay_parent_number: T::BlockNumber,
		cfg: &configuration::HostConfiguration<T::BlockNumber>,
		let mut weight = T::DbWeight::get().reads(1);

		// Enacting this should be prevented by the `can_schedule_upgrade`
		if FutureCodeHash::<T>::contains_key(&id) {
			// This branch should never be reached. Signalling an upgrade is disallowed for a para
			// that already has one upgrade scheduled.
			//
			// Any candidate that attempts to do that should be rejected by
			// `can_upgrade_validation_code`.
			//
			// NOTE: we cannot set `UpgradeGoAheadSignal` signal here since this will be reset by
			//       the following call `note_new_head`
			log::warn!(target: LOG_TARGET, "ended up scheduling an upgrade while one is pending",);
		// para signals an update to the same code? This does not make a lot of sense, so abort the
		// process right away.
		//
		// We do not want to allow this since it will mess with the code reference counting.
		weight += T::DbWeight::get().reads(1);
		if CurrentCodeHash::<T>::get(&id) == Some(code_hash) {
			// NOTE: we cannot set `UpgradeGoAheadSignal` signal here since this will be reset by
			//       the following call `note_new_head`
			log::warn!(
				target: LOG_TARGET,
				"para tried to upgrade to the same code. Abort the upgrade",
			);
			return weight
		}
		// This is the start of the upgrade process. Prevent any further attempts at upgrading.
		weight += T::DbWeight::get().writes(2);
		FutureCodeHash::<T>::insert(&id, &code_hash);
		UpgradeRestrictionSignal::<T>::insert(&id, UpgradeRestriction::Present);

		weight += T::DbWeight::get().reads_writes(1, 1);
		let next_possible_upgrade_at = relay_parent_number + cfg.validation_upgrade_frequency;
		<Self as Store>::UpgradeCooldowns::mutate(|upgrade_cooldowns| {
			let insert_idx = upgrade_cooldowns
				.binary_search_by_key(&next_possible_upgrade_at, |&(_, b)| b)
				.unwrap_or_else(|idx| idx);
			upgrade_cooldowns.insert(insert_idx, (id, next_possible_upgrade_at));
		});
		weight += Self::kick_off_pvf_check(
			PvfCheckCause::Upgrade { id, relay_parent_number },
			code_hash,
			new_code,
			cfg,
		);
		weight
	}

	/// Makes sure that the given code hash has passed pre-checking.
	///
	/// If the given code hash has already passed pre-checking, then the approval happens
	/// immediately. Similarly, if the pre-checking is turned off, the update is scheduled immediately
	/// as well. In this case, the behavior is similar to the previous, i.e. the upgrade sequence
	/// is purely time-based.
	///
	/// If the code is unknown, but the pre-checking for that PVF is already running then we perform
	/// "coalescing". We save the cause for this PVF pre-check request and just add it to the
	/// existing active PVF vote.
	///
	/// And finally, if the code is unknown and pre-checking is not running, we start the
	/// pre-checking process anew.
	///
	/// Unconditionally increases the reference count for the passed `code`.
	fn kick_off_pvf_check(
		cause: PvfCheckCause<T::BlockNumber>,
		code_hash: ValidationCodeHash,
		code: ValidationCode,
		cfg: &configuration::HostConfiguration<T::BlockNumber>,
	) -> Weight {
		let mut weight = 0;

		weight += T::DbWeight::get().reads(1);
		match PvfActiveVoteMap::<T>::get(&code_hash) {
			None => {
				// We deliberately are using `CodeByHash` here instead of the `CodeByHashRefs`. This
				// is because the code may have been added by `add_trusted_validation_code`.
				let known_code = CodeByHash::<T>::contains_key(&code_hash);
				weight += T::DbWeight::get().reads(1);

				if !cfg.pvf_checking_enabled || known_code {
					// Either:
					// - the code is known and there is no active PVF vote for it meaning it is
					//   already checked, or
					// - the PVF checking is diabled
					// In any case: fast track the PVF checking into the accepted state
					weight += T::DbWeight::get().reads(1);
					let now = <frame_system::Pallet<T>>::block_number();
					weight += Self::enact_pvf_accepted(now, &code_hash, &[cause], 0, cfg);
				} else {
					// PVF is not being pre-checked and it is not known. Start a new pre-checking
					// process.
					weight += T::DbWeight::get().reads_writes(3, 2);
					let now = <frame_system::Pallet<T>>::block_number();
					let n_validators = shared::Pallet::<T>::active_validator_keys().len();
					PvfActiveVoteMap::<T>::insert(
						&code_hash,
						PvfCheckActiveVoteState::new(now, n_validators, cause),
					);
					PvfActiveVoteList::<T>::mutate(|l| {
						if let Err(idx) = l.binary_search(&code_hash) {
							l.insert(idx, code_hash);
						}
					});
				}
			},
			Some(mut vote_state) => {
				// Coalescing: the PVF is already being pre-checked so we just need to piggy back
				// on it.
				weight += T::DbWeight::get().writes(1);
				vote_state.causes.push(cause);
				PvfActiveVoteMap::<T>::insert(&code_hash, vote_state);
			},
		}

		// We increase the code RC here in any case. Intuitively the parachain that requested this
		// action is now a user of that PVF.
		//
		// If the result of the pre-checking is reject, then we would decrease the RC for each cause,
		// including the current.
		//
		// If the result of the pre-checking is accept, then we do nothing to the RC because the PVF
		// will continue be used by the same users.
		//
		// If the PVF was fast-tracked (i.e. there is already non zero RC) and there is no
		// pre-checking, we also do not change the RC then.
		weight += Self::increase_code_ref(&code_hash, &code);

		weight
	}

	/// Note that a para has progressed to a new head, where the new head was executed in the context
	/// of a relay-chain block with given number. This will apply pending code upgrades based
	/// on the relay-parent block number provided.
	pub(crate) fn note_new_head(
		id: ParaId,
		new_head: HeadData,
		execution_context: T::BlockNumber,
	) -> Weight {
		Heads::<T>::insert(&id, new_head);
		if let Some(expected_at) = <Self as Store>::FutureCodeUpgrades::get(&id) {
			if expected_at <= execution_context {
				<Self as Store>::FutureCodeUpgrades::remove(&id);
				<Self as Store>::UpgradeGoAheadSignal::remove(&id);

				// Both should always be `Some` in this case, since a code upgrade is scheduled.
				let new_code_hash = if let Some(new_code_hash) = FutureCodeHash::<T>::take(&id) {
					new_code_hash
				} else {
					log::error!(target: LOG_TARGET, "Missing future code hash for {:?}", &id);
					return T::DbWeight::get().reads_writes(3, 1 + 3)
				};
				let maybe_prior_code_hash = CurrentCodeHash::<T>::get(&id);
				CurrentCodeHash::<T>::insert(&id, &new_code_hash);

				let log = ConsensusLog::ParaUpgradeCode(id, new_code_hash);
				<frame_system::Pallet<T>>::deposit_log(log.into());

				// `now` is only used for registering pruning as part of `fn note_past_code`
				let now = <frame_system::Pallet<T>>::block_number();
				let weight = if let Some(prior_code_hash) = maybe_prior_code_hash {
					Self::note_past_code(id, expected_at, now, prior_code_hash)
				} else {
					log::error!(target: LOG_TARGET, "Missing prior code hash for para {:?}", &id);

				// add 1 to writes due to heads update.
				weight + T::DbWeight::get().reads_writes(3, 1 + 3)
			} else {
				T::DbWeight::get().reads_writes(1, 1 + 0)
			}
		} else {
			// This means there is no upgrade scheduled.
			//
			// In case the upgrade was aborted by the relay-chain we should reset
			// the `Abort` signal.
			UpgradeGoAheadSignal::<T>::remove(&id);
			T::DbWeight::get().reads_writes(1, 2)
		}
	}

	/// Returns the list of PVFs (aka validation code) that require casting a vote by a validator in
	/// the active validator set.
	pub(crate) fn pvfs_require_precheck() -> Vec<ValidationCodeHash> {
		PvfActiveVoteList::<T>::get()
	}

	/// Submits a given PVF check statement with corresponding signature as an unsigned transaction
	/// into the memory pool. Ultimately, that disseminates the transaction accross the network.
	///
	/// This function expects an offchain context and cannot be callable from the on-chain logic.
	///
	/// The signature assumed to pertain to `stmt`.
	pub(crate) fn submit_pvf_check_statement(
		stmt: PvfCheckStatement,
		signature: ValidatorSignature,
	) {
		use frame_system::offchain::SubmitTransaction;

		if let Err(e) = SubmitTransaction::<T, Call<T>>::submit_unsigned_transaction(
			Call::include_pvf_check_statement { stmt, signature }.into(),
		) {
			log::error!(target: LOG_TARGET, "Error submitting pvf check statement: {:?}", e,);
	/// Returns the current lifecycle state of the para.
	pub fn lifecycle(id: ParaId) -> Option<ParaLifecycle> {
		ParaLifecycles::<T>::get(&id)
Sergey Pepyakin's avatar
Sergey Pepyakin committed
	/// Returns whether the given ID refers to a valid para.
	///
	/// Paras that are onboarding or offboarding are not included.
	pub fn is_valid_para(id: ParaId) -> bool {
		if let Some(state) = ParaLifecycles::<T>::get(&id) {
			!state.is_onboarding() && !state.is_offboarding()
		} else {
			false
		}
	}

	/// Whether a para ID corresponds to any live parachain.
	///
	/// Includes parachains which will downgrade to a parathread in the future.
	pub fn is_parachain(id: ParaId) -> bool {
		if let Some(state) = ParaLifecycles::<T>::get(&id) {
	/// Whether a para ID corresponds to any live parathread.
	///
	/// Includes parathreads which will upgrade to parachains in the future.
	pub fn is_parathread(id: ParaId) -> bool {
		if let Some(state) = ParaLifecycles::<T>::get(&id) {
	/// If a candidate from the specified parachain were submitted at the current block, this
	/// function returns if that candidate passes the acceptance criteria.
	pub(crate) fn can_upgrade_validation_code(id: ParaId) -> bool {
		FutureCodeHash::<T>::get(&id).is_none() && UpgradeRestrictionSignal::<T>::get(&id).is_none()

	/// Return the session index that should be used for any future scheduled changes.
	fn scheduled_session() -> SessionIndex {
		shared::Pallet::<T>::scheduled_session()
	/// Store the validation code if not already stored, and increase the number of reference.
	///
	/// Returns the weight consumed.
	fn increase_code_ref(code_hash: &ValidationCodeHash, code: &ValidationCode) -> Weight {
		let mut weight = T::DbWeight::get().reads_writes(1, 1);
		<Self as Store>::CodeByHashRefs::mutate(code_hash, |refs| {
			if *refs == 0 {
				weight += T::DbWeight::get().writes(1);
				<Self as Store>::CodeByHash::insert(code_hash, code);
			}
			*refs += 1;
		});
	/// Decrease the number of reference of the validation code and remove it from storage if zero
	///
	/// Returns the weight consumed.
	fn decrease_code_ref(code_hash: &ValidationCodeHash) -> Weight {
		let mut weight = T::DbWeight::get().reads(1);
		let refs = <Self as Store>::CodeByHashRefs::get(code_hash);
		if refs == 0 {
			log::error!(target: LOG_TARGET, "Code refs is already zero for {:?}", code_hash);
			return weight
		}
			weight += T::DbWeight::get().writes(2);
			<Self as Store>::CodeByHash::remove(code_hash);
			<Self as Store>::CodeByHashRefs::remove(code_hash);
		} else {
			weight += T::DbWeight::get().writes(1);
			<Self as Store>::CodeByHashRefs::insert(code_hash, refs - 1);
		}
	/// Test function for triggering a new session in this pallet.
	#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))]
	pub fn test_on_new_session() {
		Self::initializer_on_new_session(&SessionChangeNotification {
			session_index: shared::Pallet::<T>::session_index(),
			..Default::default()
		});
	}

	#[cfg(any(feature = "runtime-benchmarks", test))]
	pub fn heads_insert(para_id: &ParaId, head_data: HeadData) {
		Heads::<T>::insert(para_id, head_data);
	}
}

#[cfg(test)]
mod tests {
	use super::*;
	use frame_support::{assert_err, assert_ok, assert_storage_noop};
	use keyring::Sr25519Keyring;
	use primitives::{
		v0::PARACHAIN_KEY_TYPE_ID,
		v1::{BlockNumber, ValidatorId},
	};
	use sc_keystore::LocalKeystore;
	use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr};
	use std::sync::Arc;
	use test_helpers::{dummy_head_data, dummy_validation_code};
	use crate::{
		configuration::HostConfiguration,
		mock::{
			new_test_ext, Configuration, MockGenesisConfig, Origin, Paras, ParasShared, System,
			Test,
		},
	static VALIDATORS: &[Sr25519Keyring] = &[
		Sr25519Keyring::Alice,
		Sr25519Keyring::Bob,
		Sr25519Keyring::Charlie,
		Sr25519Keyring::Dave,
		Sr25519Keyring::Ferdie,
	];

	fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> Vec<ValidatorId> {
		val_ids.iter().map(|v| v.public().into()).collect()
	}

	fn sign_and_include_pvf_check_statement(stmt: PvfCheckStatement) {
		let validators = &[
			Sr25519Keyring::Alice,
			Sr25519Keyring::Bob,
			Sr25519Keyring::Charlie,
			Sr25519Keyring::Dave,
			Sr25519Keyring::Ferdie,
		];
		let signature = validators[stmt.validator_index.0 as usize].sign(&stmt.signing_payload());
		Paras::include_pvf_check_statement(None.into(), stmt, signature.into()).unwrap();
	}

	fn run_to_block(to: BlockNumber, new_session: Option<Vec<BlockNumber>>) {
		let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
		for validator in VALIDATORS.iter() {
			SyncCryptoStore::sr25519_generate_new(
				&*keystore,
				PARACHAIN_KEY_TYPE_ID,
				Some(&validator.to_seed()),
			)
			.unwrap();
		}
		let validator_pubkeys = validator_pubkeys(VALIDATORS);

		while System::block_number() < to {
			let b = System::block_number();
			Paras::initializer_finalize();
			ParasShared::initializer_finalize();
			if new_session.as_ref().map_or(false, |v| v.contains(&(b + 1))) {
				let mut session_change_notification = SessionChangeNotification::default();
				session_change_notification.session_index = ParasShared::session_index() + 1;
				session_change_notification.validators = validator_pubkeys.clone();
				ParasShared::initializer_on_new_session(
					session_change_notification.session_index,
					session_change_notification.random_seed,
					&session_change_notification.new_config,
					session_change_notification.validators.clone(),
				);
				ParasShared::set_active_validators_ascending(validator_pubkeys.clone());