Newer
Older
parachains.insert(i, para);
}
ParaLifecycles::<T>::insert(¶, ParaLifecycle::Parachain);
},
// Downgrade a parachain to a parathread
Some(ParaLifecycle::DowngradingParachain) => {
if let Ok(i) = parachains.binary_search(¶) {
parachains.remove(i);
}
ParaLifecycles::<T>::insert(¶, ParaLifecycle::Parathread);
},
// Offboard a parathread or parachain from the system
Some(ParaLifecycle::OffboardingParachain) |
Some(ParaLifecycle::OffboardingParathread) => {
if let Ok(i) = parachains.binary_search(¶) {
parachains.remove(i);
}
<Self as Store>::Heads::remove(¶);
<Self as Store>::FutureCodeUpgrades::remove(¶);
<Self as Store>::UpgradeGoAheadSignal::remove(¶);
<Self as Store>::UpgradeRestrictionSignal::remove(¶);
let removed_future_code_hash = <Self as Store>::FutureCodeHash::take(¶);
if let Some(removed_future_code_hash) = removed_future_code_hash {
Self::decrease_code_ref(&removed_future_code_hash);
}
let removed_code_hash = <Self as Store>::CurrentCodeHash::take(¶);
if let Some(removed_code_hash) = removed_code_hash {
Self::note_past_code(para, now, now, removed_code_hash);
}
outgoing.push(para);
},
}
if !outgoing.is_empty() {
// Filter offboarded parachains from the upcoming upgrades and upgrade cooldowns list.
//
// We do it after the offboarding to get away with only a single read/write per list.
//
// NOTE both of those iterates over the list and the outgoing. We do not expect either
// of these to be large. Thus should be fine.
<Self as Store>::UpcomingUpgrades::mutate(|upcoming_upgrades| {
*upcoming_upgrades = mem::take(upcoming_upgrades)
.into_iter()
.filter(|&(ref para, _)| !outgoing.contains(para))
.collect();
});
<Self as Store>::UpgradeCooldowns::mutate(|upgrade_cooldowns| {
*upgrade_cooldowns = mem::take(upgrade_cooldowns)
.into_iter()
.filter(|&(ref para, _)| !outgoing.contains(para))
.collect();
});
}
// Place the new parachains set in storage.
<Self as Store>::Parachains::set(parachains);
return outgoing
// note replacement of the code of para with given `id`, which occured in the
// context of the given relay-chain block number. provide the replaced code.
//
// `at` for para-triggered replacement is the block number of the relay-chain
// block in whose context the parablock was executed
// (i.e. number of `relay_parent` in the receipt)
fn note_past_code(
id: ParaId,
at: T::BlockNumber,
now: T::BlockNumber,
old_code_hash: ValidationCodeHash,
) -> Weight {
<Self as Store>::PastCodeMeta::mutate(&id, |past_meta| {
past_meta.note_replacement(at, now);
});
<Self as Store>::PastCodeHash::insert(&(id, at), old_code_hash);
// Schedule pruning for this past-code to be removed as soon as it
// exits the slashing window.
<Self as Store>::PastCodePruning::mutate(|pruning| {
pruning.binary_search_by_key(&now, |&(_, b)| b).unwrap_or_else(|idx| idx);
pruning.insert(insert_idx, (id, now));
});
T::DbWeight::get().reads_writes(2, 3)
}
// looks at old code metadata, compares them to the current acceptance window, and prunes those
// that are too old.
fn prune_old_code(now: T::BlockNumber) -> Weight {
let config = configuration::Pallet::<T>::config();
let code_retention_period = config.code_retention_period;
if now <= code_retention_period {
let weight = T::DbWeight::get().reads_writes(1, 0);
}
// The height of any changes we no longer should keep around.
let pruning_height = now - (code_retention_period + One::one());
let pruning_tasks_done = <Self as Store>::PastCodePruning::mutate(
|pruning_tasks: &mut Vec<(_, T::BlockNumber)>| {
let (pruning_tasks_done, pruning_tasks_to_do) = {
// find all past code that has just exited the pruning window.
let up_to_idx =
pruning_tasks.iter().take_while(|&(_, at)| at <= &pruning_height).count();
(up_to_idx, pruning_tasks.drain(..up_to_idx))
};
for (para_id, _) in pruning_tasks_to_do {
let full_deactivate = <Self as Store>::PastCodeMeta::mutate(¶_id, |meta| {
for pruned_repl_at in meta.prune_up_to(pruning_height) {
let removed_code_hash =
<Self as Store>::PastCodeHash::take(&(para_id, pruned_repl_at));
if let Some(removed_code_hash) = removed_code_hash {
Self::decrease_code_ref(&removed_code_hash);
} else {
log::warn!(
"Missing code for removed hash {:?}",
removed_code_hash,
);
}
meta.is_empty() && Self::para_head(¶_id).is_none()
});
// This parachain has been removed and now the vestigial code
// has been removed from the state. clean up meta as well.
if full_deactivate {
<Self as Store>::PastCodeMeta::remove(¶_id);
}
}
pruning_tasks_done as u64
// 1 read for the meta for each pruning task, 1 read for the config
// 2 writes: updating the meta and pruning the code
T::DbWeight::get().reads_writes(1 + pruning_tasks_done, 2 * pruning_tasks_done)
}
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
/// Process the timers related to upgrades. Specifically, the upgrade go ahead signals toggle
/// and the upgrade cooldown restrictions.
///
/// Takes the current block number and returns the weight consumed.
fn process_scheduled_upgrade_changes(now: T::BlockNumber) -> Weight {
let upgrades_signaled = <Self as Store>::UpcomingUpgrades::mutate(
|upcoming_upgrades: &mut Vec<(ParaId, T::BlockNumber)>| {
let num = upcoming_upgrades.iter().take_while(|&(_, at)| at <= &now).count();
for (para, _) in upcoming_upgrades.drain(..num) {
<Self as Store>::UpgradeGoAheadSignal::insert(¶, UpgradeGoAhead::GoAhead);
}
num
},
);
let cooldowns_expired = <Self as Store>::UpgradeCooldowns::mutate(
|upgrade_cooldowns: &mut Vec<(ParaId, T::BlockNumber)>| {
let num = upgrade_cooldowns.iter().take_while(|&(_, at)| at <= &now).count();
for (para, _) in upgrade_cooldowns.drain(..num) {
<Self as Store>::UpgradeRestrictionSignal::remove(¶);
}
num
},
);
T::DbWeight::get().reads_writes(2, upgrades_signaled as u64 + cooldowns_expired as u64)
}
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
/// Goes over all PVF votes in progress, reinitializes ballots, increments ages and prunes the
/// active votes that reached their time-to-live.
fn groom_ongoing_pvf_votes(
cfg: &configuration::HostConfiguration<T::BlockNumber>,
new_n_validators: usize,
) -> Weight {
let mut weight = T::DbWeight::get().reads(1);
let potentially_active_votes = PvfActiveVoteList::<T>::get();
// Initially empty list which contains all the PVF active votes that made it through this
// session change.
//
// **Ordered** as well as `PvfActiveVoteList`.
let mut actually_active_votes = Vec::with_capacity(potentially_active_votes.len());
for vote_subject in potentially_active_votes {
let mut vote_state = match PvfActiveVoteMap::<T>::take(&vote_subject) {
Some(v) => v,
None => {
// This branch should never be reached. This is due to the fact that the set of
// `PvfActiveVoteMap`'s keys is always equal to the set of items found in
// `PvfActiveVoteList`.
log::warn!(
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
"The PvfActiveVoteMap is out of sync with PvfActiveVoteList!",
);
debug_assert!(false);
continue
},
};
vote_state.age += 1;
if vote_state.age < cfg.pvf_voting_ttl {
weight += T::DbWeight::get().writes(1);
vote_state.reinitialize_ballots(new_n_validators);
PvfActiveVoteMap::<T>::insert(&vote_subject, vote_state);
// push maintaining the original order.
actually_active_votes.push(vote_subject);
} else {
// TTL is reached. Reject.
weight += Self::enact_pvf_rejected(&vote_subject, vote_state.causes);
}
}
weight += T::DbWeight::get().writes(1);
PvfActiveVoteList::<T>::put(actually_active_votes);
weight
}
fn enact_pvf_accepted(
now: T::BlockNumber,
code_hash: &ValidationCodeHash,
causes: &[PvfCheckCause<T::BlockNumber>],
sessions_observed: SessionIndex,
cfg: &configuration::HostConfiguration<T::BlockNumber>,
) -> Weight {
let mut weight = 0;
for cause in causes {
match cause {
PvfCheckCause::Onboarding(id) => {
weight += Self::proceed_with_onboarding(*id, sessions_observed);
},
PvfCheckCause::Upgrade { id, relay_parent_number } => {
weight +=
Self::proceed_with_upgrade(*id, code_hash, now, *relay_parent_number, cfg);
},
}
}
weight
}
fn proceed_with_onboarding(id: ParaId, sessions_observed: SessionIndex) -> Weight {
let weight = T::DbWeight::get().reads_writes(2, 1);
// we should onboard only after `SESSION_DELAY` sessions but we should take
// into account the number of sessions the PVF pre-checking occupied.
//
// we cannot onboard at the current session, so it must be at least one
// session ahead.
let onboard_at: SessionIndex = shared::Pallet::<T>::session_index() +
cmp::max(shared::SESSION_DELAY.saturating_sub(sessions_observed), 1);
ActionsQueue::<T>::mutate(onboard_at, |v| {
if let Err(i) = v.binary_search(&id) {
v.insert(i, id);
}
});
weight
}
fn proceed_with_upgrade(
id: ParaId,
code_hash: &ValidationCodeHash,
now: T::BlockNumber,
relay_parent_number: T::BlockNumber,
cfg: &configuration::HostConfiguration<T::BlockNumber>,
) -> Weight {
let mut weight = 0;
// Compute the relay-chain block number starting at which the code upgrade is ready to be
// applied.
//
// The first parablock that has a relay-parent higher or at the same height of `expected_at`
// will trigger the code upgrade. The parablock that comes after that will be validated
// against the new validation code.
//
// Here we are trying to choose the block number that will have `validation_upgrade_delay`
// blocks from the relay-parent of the block that schedule code upgrade but no less than
// `minimum_validation_upgrade_delay`. We want this delay out of caution so that when
// the last vote for pre-checking comes the parachain will have some time until the upgrade
// finally takes place.
let expected_at = cmp::max(
relay_parent_number + cfg.validation_upgrade_delay,
now + cfg.minimum_validation_upgrade_delay,
);
weight += T::DbWeight::get().reads_writes(1, 4);
FutureCodeUpgrades::<T>::insert(&id, expected_at);
<Self as Store>::UpcomingUpgrades::mutate(|upcoming_upgrades| {
let insert_idx = upcoming_upgrades
.binary_search_by_key(&expected_at, |&(_, b)| b)
.unwrap_or_else(|idx| idx);
upcoming_upgrades.insert(insert_idx, (id, expected_at));
});
let expected_at = expected_at.saturated_into();
let log = ConsensusLog::ParaScheduleUpgradeCode(id, *code_hash, expected_at);
<frame_system::Pallet<T>>::deposit_log(log.into());
weight
}
fn enact_pvf_rejected(
code_hash: &ValidationCodeHash,
causes: Vec<PvfCheckCause<T::BlockNumber>>,
) -> Weight {
let mut weight = T::DbWeight::get().writes(1);
for cause in causes {
// Whenever PVF pre-checking is started or a new cause is added to it, the RC is bumped.
// Now we need to unbump it.
weight += Self::decrease_code_ref(code_hash);
match cause {
PvfCheckCause::Onboarding(id) => {
// Here we need to undo everything that was done during `schedule_para_initialize`.
// Essentially, the logic is similar to offboarding, with exception that before
// actual onboarding the parachain did not have a chance to reach to upgrades.
// Therefore we can skip all the upgrade related storage items here.
weight += T::DbWeight::get().writes(3);
UpcomingParasGenesis::<T>::remove(&id);
CurrentCodeHash::<T>::remove(&id);
ParaLifecycles::<T>::remove(&id);
},
PvfCheckCause::Upgrade { id, .. } => {
weight += T::DbWeight::get().writes(2);
UpgradeGoAheadSignal::<T>::insert(&id, UpgradeGoAhead::Abort);
FutureCodeHash::<T>::remove(&id);
},
}
}
weight
}
/// Verify that `schedule_para_initialize` can be called successfully.
///
/// Returns false if para is already registered in the system.
pub fn can_schedule_para_initialize(id: &ParaId) -> bool {
ParaLifecycles::<T>::get(id).is_none()
/// Schedule a para to be initialized. If the validation code is not already stored in the
/// code storage, then a PVF pre-checking process will be initiated.
/// Only after the PVF pre-checking succeeds can the para be onboarded. Note, that calling this
/// does not guarantee that the parachain will eventually be onboarded. This can happen in case
/// the PVF does not pass PVF pre-checking.
///
/// The Para ID should be not activated in this module. The validation code supplied in
/// `genesis_data` should not be empty. If those conditions are not met, then the para cannot
/// be onboarded.
pub(crate) fn schedule_para_initialize(
id: ParaId,
mut genesis_data: ParaGenesisArgs,
) -> DispatchResult {
// Make sure parachain isn't already in our system and that the onboarding parameters are
// valid.
ensure!(Self::can_schedule_para_initialize(&id), Error::<T>::CannotOnboard);
ensure!(!genesis_data.validation_code.0.is_empty(), Error::<T>::CannotOnboard);
ParaLifecycles::<T>::insert(&id, ParaLifecycle::Onboarding);
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
// HACK: here we are doing something nasty.
//
// In order to fix the [soaking issue] we insert the code eagerly here. When the onboarding
// is finally enacted, we do not need to insert the code anymore. Therefore, there is no
// reason for the validation code to be copied into the `ParaGenesisArgs`. We also do not
// want to risk it by copying the validation code needlessly to not risk adding more
// memory pressure.
//
// That said, we also want to preserve `ParaGenesisArgs` as it is, for now. There are two
// reasons:
//
// - Doing it within the context of the PR that introduces this change is undesirable, since
// it is already a big change, and that change would require a migration. Moreover, if we
// run the new version of the runtime, there will be less things to worry about during
// the eventual proper migration.
//
// - This data type already is used for generating genesis, and changing it will probably
// introduce some unnecessary burden.
//
// So instead of going through it right now, we will do something sneaky. Specifically:
//
// - Insert the `CurrentCodeHash` now, instead during the onboarding. That would allow to
// get rid of hashing of the validation code when onboarding.
//
// - Replace `validation_code` with a sentinel value: an empty vector. This should be fine
// as long we do not allow registering parachains with empty code. At the moment of writing
// this should already be the case.
//
// - Empty value is treated as the current code is already inserted during the onboarding.
//
// This is only an intermediate solution and should be fixed in foreseable future.
//
// [soaking issue]: https://github.com/paritytech/polkadot/issues/3918
let validation_code =
mem::replace(&mut genesis_data.validation_code, ValidationCode(Vec::new()));
UpcomingParasGenesis::<T>::insert(&id, genesis_data);
let validation_code_hash = validation_code.hash();
<Self as Store>::CurrentCodeHash::insert(&id, validation_code_hash);
let cfg = configuration::Pallet::<T>::config();
Self::kick_off_pvf_check(
PvfCheckCause::Onboarding(id),
validation_code_hash,
validation_code,
&cfg,
);
}
/// Schedule a para to be cleaned up at the start of the next session.
/// Will return error if either is true:
///
/// - para is not a stable parachain or parathread (i.e. [`ParaLifecycle::is_stable`] is `false`)
/// - para has a pending upgrade.
///
/// No-op if para is not registered at all.
pub(crate) fn schedule_para_cleanup(id: ParaId) -> DispatchResult {
// Disallow offboarding in case there is an upcoming upgrade.
//
// This is not a fundamential limitation but rather simplification: it allows us to get
// away without introducing additional logic for pruning and, more importantly, enacting
// ongoing PVF pre-checking votes. It also removes some nasty edge cases.
//
// This implicitly assumes that the given para exists, i.e. it's lifecycle != None.
if FutureCodeHash::<T>::contains_key(&id) {
return Err(Error::<T>::CannotOffboard.into())
}
let lifecycle = ParaLifecycles::<T>::get(&id);
// If para is not registered, nothing to do!
Some(ParaLifecycle::Parathread) => {
ParaLifecycles::<T>::insert(&id, ParaLifecycle::OffboardingParathread);
Some(ParaLifecycle::Parachain) => {
ParaLifecycles::<T>::insert(&id, ParaLifecycle::OffboardingParachain);
_ => return Err(Error::<T>::CannotOffboard)?,
let scheduled_session = Self::scheduled_session();
ActionsQueue::<T>::mutate(scheduled_session, |v| {
if let Err(i) = v.binary_search(&id) {
v.insert(i, id);
}
});
Ok(())
}
/// Schedule a parathread to be upgraded to a parachain.
///
/// Will return error if `ParaLifecycle` is not `Parathread`.
pub(crate) fn schedule_parathread_upgrade(id: ParaId) -> DispatchResult {
let scheduled_session = Self::scheduled_session();
let lifecycle = ParaLifecycles::<T>::get(&id).ok_or(Error::<T>::NotRegistered)?;
ensure!(lifecycle == ParaLifecycle::Parathread, Error::<T>::CannotUpgrade);
ParaLifecycles::<T>::insert(&id, ParaLifecycle::UpgradingParathread);
ActionsQueue::<T>::mutate(scheduled_session, |v| {
if let Err(i) = v.binary_search(&id) {
v.insert(i, id);
}
/// Schedule a parachain to be downgraded to a parathread.
///
/// Noop if `ParaLifecycle` is not `Parachain`.
pub(crate) fn schedule_parachain_downgrade(id: ParaId) -> DispatchResult {
let scheduled_session = Self::scheduled_session();
let lifecycle = ParaLifecycles::<T>::get(&id).ok_or(Error::<T>::NotRegistered)?;
ensure!(lifecycle == ParaLifecycle::Parachain, Error::<T>::CannotDowngrade);
ParaLifecycles::<T>::insert(&id, ParaLifecycle::DowngradingParachain);
ActionsQueue::<T>::mutate(scheduled_session, |v| {
if let Err(i) = v.binary_search(&id) {
v.insert(i, id);
/// Schedule a future code upgrade of the given parachain.
///
/// If the new code is not known, then the PVF pre-checking will be started for that validation
/// code. In case the validation code does not pass the PVF pre-checking process, the
/// upgrade will be aborted.
///
/// Only after the code is approved by the process, the upgrade can be scheduled. Specifically,
/// the relay-chain block number will be determined at which the upgrade will take place. We
/// call that block `expected_at`.
///
/// Once the candidate with the relay-parent >= `expected_at` is enacted, the new validation code
/// will be applied. Therefore, the new code will be used to validate the next candidate.
/// The new code should not be equal to the current one, otherwise the upgrade will be aborted.
/// If there is already a scheduled code upgrade for the para, this is a no-op.
pub(crate) fn schedule_code_upgrade(
id: ParaId,
new_code: ValidationCode,
relay_parent_number: T::BlockNumber,
cfg: &configuration::HostConfiguration<T::BlockNumber>,
let mut weight = T::DbWeight::get().reads(1);
// Enacting this should be prevented by the `can_schedule_upgrade`
if FutureCodeHash::<T>::contains_key(&id) {
// This branch should never be reached. Signalling an upgrade is disallowed for a para
// that already has one upgrade scheduled.
//
// Any candidate that attempts to do that should be rejected by
// `can_upgrade_validation_code`.
//
// NOTE: we cannot set `UpgradeGoAheadSignal` signal here since this will be reset by
// the following call `note_new_head`
log::warn!(target: LOG_TARGET, "ended up scheduling an upgrade while one is pending",);
return weight
}
let code_hash = new_code.hash();
// para signals an update to the same code? This does not make a lot of sense, so abort the
// process right away.
//
// We do not want to allow this since it will mess with the code reference counting.
weight += T::DbWeight::get().reads(1);
if CurrentCodeHash::<T>::get(&id) == Some(code_hash) {
// NOTE: we cannot set `UpgradeGoAheadSignal` signal here since this will be reset by
// the following call `note_new_head`
log::warn!(
"para tried to upgrade to the same code. Abort the upgrade",
);
return weight
}
// This is the start of the upgrade process. Prevent any further attempts at upgrading.
weight += T::DbWeight::get().writes(2);
FutureCodeHash::<T>::insert(&id, &code_hash);
UpgradeRestrictionSignal::<T>::insert(&id, UpgradeRestriction::Present);
weight += T::DbWeight::get().reads_writes(1, 1);
let next_possible_upgrade_at = relay_parent_number + cfg.validation_upgrade_frequency;
<Self as Store>::UpgradeCooldowns::mutate(|upgrade_cooldowns| {
let insert_idx = upgrade_cooldowns
.binary_search_by_key(&next_possible_upgrade_at, |&(_, b)| b)
.unwrap_or_else(|idx| idx);
upgrade_cooldowns.insert(insert_idx, (id, next_possible_upgrade_at));
});
weight += Self::kick_off_pvf_check(
PvfCheckCause::Upgrade { id, relay_parent_number },
code_hash,
new_code,
cfg,
);
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
weight
}
/// Makes sure that the given code hash has passed pre-checking.
///
/// If the given code hash has already passed pre-checking, then the approval happens
/// immediately. Similarly, if the pre-checking is turned off, the update is scheduled immediately
/// as well. In this case, the behavior is similar to the previous, i.e. the upgrade sequence
/// is purely time-based.
///
/// If the code is unknown, but the pre-checking for that PVF is already running then we perform
/// "coalescing". We save the cause for this PVF pre-check request and just add it to the
/// existing active PVF vote.
///
/// And finally, if the code is unknown and pre-checking is not running, we start the
/// pre-checking process anew.
///
/// Unconditionally increases the reference count for the passed `code`.
fn kick_off_pvf_check(
cause: PvfCheckCause<T::BlockNumber>,
code_hash: ValidationCodeHash,
code: ValidationCode,
cfg: &configuration::HostConfiguration<T::BlockNumber>,
) -> Weight {
let mut weight = 0;
weight += T::DbWeight::get().reads(1);
match PvfActiveVoteMap::<T>::get(&code_hash) {
None => {
let known_code = CodeByHash::<T>::contains_key(&code_hash);
weight += T::DbWeight::get().reads(1);
if !cfg.pvf_checking_enabled || known_code {
// Either:
// - the code is known and there is no active PVF vote for it meaning it is
// already checked, or
// - the PVF checking is diabled
// In any case: fast track the PVF checking into the accepted state
weight += T::DbWeight::get().reads(1);
let now = <frame_system::Pallet<T>>::block_number();
weight += Self::enact_pvf_accepted(now, &code_hash, &[cause], 0, cfg);
} else {
// PVF is not being pre-checked and it is not known. Start a new pre-checking
// process.
weight += T::DbWeight::get().reads_writes(3, 2);
let now = <frame_system::Pallet<T>>::block_number();
let n_validators = shared::Pallet::<T>::active_validator_keys().len();
PvfActiveVoteMap::<T>::insert(
&code_hash,
PvfCheckActiveVoteState::new(now, n_validators, cause),
);
PvfActiveVoteList::<T>::mutate(|l| {
if let Err(idx) = l.binary_search(&code_hash) {
l.insert(idx, code_hash);
}
});
}
},
Some(mut vote_state) => {
// Coalescing: the PVF is already being pre-checked so we just need to piggy back
// on it.
weight += T::DbWeight::get().writes(1);
vote_state.causes.push(cause);
PvfActiveVoteMap::<T>::insert(&code_hash, vote_state);
},
}
// We increase the code RC here in any case. Intuitively the parachain that requested this
// action is now a user of that PVF.
//
// If the result of the pre-checking is reject, then we would decrease the RC for each cause,
// including the current.
//
// If the result of the pre-checking is accept, then we do nothing to the RC because the PVF
// will continue be used by the same users.
//
// If the PVF was fast-tracked (i.e. there is already non zero RC) and there is no
// pre-checking, we also do not change the RC then.
weight += Self::increase_code_ref(&code_hash, &code);
weight
}
/// Note that a para has progressed to a new head, where the new head was executed in the context
/// of a relay-chain block with given number. This will apply pending code upgrades based
/// on the relay-parent block number provided.
pub(crate) fn note_new_head(
id: ParaId,
new_head: HeadData,
execution_context: T::BlockNumber,
) -> Weight {
if let Some(expected_at) = <Self as Store>::FutureCodeUpgrades::get(&id) {
if expected_at <= execution_context {
<Self as Store>::FutureCodeUpgrades::remove(&id);
<Self as Store>::UpgradeGoAheadSignal::remove(&id);
// Both should always be `Some` in this case, since a code upgrade is scheduled.
let new_code_hash = if let Some(new_code_hash) = FutureCodeHash::<T>::take(&id) {
new_code_hash
} else {
log::error!(target: LOG_TARGET, "Missing future code hash for {:?}", &id);
return T::DbWeight::get().reads_writes(3, 1 + 3)
};
let maybe_prior_code_hash = CurrentCodeHash::<T>::get(&id);
CurrentCodeHash::<T>::insert(&id, &new_code_hash);
let log = ConsensusLog::ParaUpgradeCode(id, new_code_hash);
<frame_system::Pallet<T>>::deposit_log(log.into());
// `now` is only used for registering pruning as part of `fn note_past_code`
Shaun Wang
committed
let now = <frame_system::Pallet<T>>::block_number();
let weight = if let Some(prior_code_hash) = maybe_prior_code_hash {
Self::note_past_code(id, expected_at, now, prior_code_hash)
} else {
log::error!(target: LOG_TARGET, "Missing prior code hash for para {:?}", &id);
0 as Weight
};
// add 1 to writes due to heads update.
weight + T::DbWeight::get().reads_writes(3, 1 + 3)
} else {
T::DbWeight::get().reads_writes(1, 1 + 0)
}
} else {
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
// This means there is no upgrade scheduled.
//
// In case the upgrade was aborted by the relay-chain we should reset
// the `Abort` signal.
UpgradeGoAheadSignal::<T>::remove(&id);
T::DbWeight::get().reads_writes(1, 2)
}
}
/// Returns the list of PVFs (aka validation code) that require casting a vote by a validator in
/// the active validator set.
pub(crate) fn pvfs_require_precheck() -> Vec<ValidationCodeHash> {
PvfActiveVoteList::<T>::get()
}
/// Submits a given PVF check statement with corresponding signature as an unsigned transaction
/// into the memory pool. Ultimately, that disseminates the transaction accross the network.
///
/// This function expects an offchain context and cannot be callable from the on-chain logic.
///
/// The signature assumed to pertain to `stmt`.
pub(crate) fn submit_pvf_check_statement(
stmt: PvfCheckStatement,
signature: ValidatorSignature,
) {
use frame_system::offchain::SubmitTransaction;
if let Err(e) = SubmitTransaction::<T, Call<T>>::submit_unsigned_transaction(
Call::include_pvf_check_statement { stmt, signature }.into(),
) {
log::error!(target: LOG_TARGET, "Error submitting pvf check statement: {:?}", e,);
/// Returns the current lifecycle state of the para.
pub fn lifecycle(id: ParaId) -> Option<ParaLifecycle> {
/// Returns whether the given ID refers to a valid para.
///
/// Paras that are onboarding or offboarding are not included.
pub fn is_valid_para(id: ParaId) -> bool {
if let Some(state) = ParaLifecycles::<T>::get(&id) {
!state.is_onboarding() && !state.is_offboarding()
} else {
false
}
}
/// Whether a para ID corresponds to any live parachain.
///
/// Includes parachains which will downgrade to a parathread in the future.
pub fn is_parachain(id: ParaId) -> bool {
if let Some(state) = ParaLifecycles::<T>::get(&id) {
state.is_parachain()
} else {
false
}
/// Whether a para ID corresponds to any live parathread.
///
/// Includes parathreads which will upgrade to parachains in the future.
pub fn is_parathread(id: ParaId) -> bool {
if let Some(state) = ParaLifecycles::<T>::get(&id) {
state.is_parathread()
} else {
false
}
/// If a candidate from the specified parachain were submitted at the current block, this
/// function returns if that candidate passes the acceptance criteria.
pub(crate) fn can_upgrade_validation_code(id: ParaId) -> bool {
FutureCodeHash::<T>::get(&id).is_none() && UpgradeRestrictionSignal::<T>::get(&id).is_none()
/// Return the session index that should be used for any future scheduled changes.
fn scheduled_session() -> SessionIndex {
/// Store the validation code if not already stored, and increase the number of reference.
///
/// Returns the weight consumed.
fn increase_code_ref(code_hash: &ValidationCodeHash, code: &ValidationCode) -> Weight {
let mut weight = T::DbWeight::get().reads_writes(1, 1);
<Self as Store>::CodeByHashRefs::mutate(code_hash, |refs| {
if *refs == 0 {
weight += T::DbWeight::get().writes(1);
<Self as Store>::CodeByHash::insert(code_hash, code);
}
*refs += 1;
});
weight
/// Decrease the number of reference of the validation code and remove it from storage if zero
///
/// Returns the weight consumed.
fn decrease_code_ref(code_hash: &ValidationCodeHash) -> Weight {
let mut weight = T::DbWeight::get().reads(1);
let refs = <Self as Store>::CodeByHashRefs::get(code_hash);
debug_assert!(refs != 0);
weight += T::DbWeight::get().writes(2);
<Self as Store>::CodeByHash::remove(code_hash);
<Self as Store>::CodeByHashRefs::remove(code_hash);
} else {
weight += T::DbWeight::get().writes(1);
<Self as Store>::CodeByHashRefs::insert(code_hash, refs - 1);
}
weight
/// Test function for triggering a new session in this pallet.
#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))]
pub fn test_on_new_session() {
Self::initializer_on_new_session(&SessionChangeNotification {
session_index: shared::Pallet::<T>::session_index(),
..Default::default()
});
}
#[cfg(any(feature = "runtime-benchmarks", test))]
pub fn heads_insert(para_id: &ParaId, head_data: HeadData) {
Heads::<T>::insert(para_id, head_data);
}
}
#[cfg(test)]
mod tests {
use super::*;
use frame_support::{assert_err, assert_ok};
use keyring::Sr25519Keyring;
use primitives::{
v0::PARACHAIN_KEY_TYPE_ID,
v1::{BlockNumber, ValidatorId},
};
use sc_keystore::LocalKeystore;
use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr};
use std::sync::Arc;
use test_helpers::{dummy_head_data, dummy_validation_code};
use crate::{
configuration::HostConfiguration,
mock::{new_test_ext, Configuration, MockGenesisConfig, Paras, ParasShared, System, Test},
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
static VALIDATORS: &[Sr25519Keyring] = &[
Sr25519Keyring::Alice,
Sr25519Keyring::Bob,
Sr25519Keyring::Charlie,
Sr25519Keyring::Dave,
Sr25519Keyring::Ferdie,
];
fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> Vec<ValidatorId> {
val_ids.iter().map(|v| v.public().into()).collect()
}
fn sign_and_include_pvf_check_statement(stmt: PvfCheckStatement) {
let validators = &[
Sr25519Keyring::Alice,
Sr25519Keyring::Bob,
Sr25519Keyring::Charlie,
Sr25519Keyring::Dave,
Sr25519Keyring::Ferdie,
];
let signature = validators[stmt.validator_index.0 as usize].sign(&stmt.signing_payload());
Paras::include_pvf_check_statement(None.into(), stmt, signature.into()).unwrap();
}
fn run_to_block(to: BlockNumber, new_session: Option<Vec<BlockNumber>>) {
let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
for validator in VALIDATORS.iter() {
SyncCryptoStore::sr25519_generate_new(
&*keystore,
PARACHAIN_KEY_TYPE_ID,
Some(&validator.to_seed()),
)
.unwrap();
}
let validator_pubkeys = validator_pubkeys(VALIDATORS);
while System::block_number() < to {
let b = System::block_number();
Paras::initializer_finalize();
ParasShared::initializer_finalize();
if new_session.as_ref().map_or(false, |v| v.contains(&(b + 1))) {
let mut session_change_notification = SessionChangeNotification::default();
session_change_notification.session_index = ParasShared::session_index() + 1;
session_change_notification.validators = validator_pubkeys.clone();
ParasShared::initializer_on_new_session(
session_change_notification.session_index,
session_change_notification.random_seed,
&session_change_notification.new_config,
session_change_notification.validators.clone(),
);
ParasShared::set_active_validators_ascending(validator_pubkeys.clone());
Paras::initializer_on_new_session(&session_change_notification);
System::on_finalize(b);
System::on_initialize(b + 1);
System::set_block_number(b + 1);
ParasShared::initializer_initialize(b + 1);
Paras::initializer_initialize(b + 1);
}
}
fn upgrade_at(
expected_at: BlockNumber,
activated_at: BlockNumber,
) -> ReplacementTimes<BlockNumber> {
ReplacementTimes { expected_at, activated_at }
}
fn check_code_is_stored(validation_code: &ValidationCode) {
assert!(<Paras as Store>::CodeByHashRefs::get(validation_code.hash()) != 0);
assert!(<Paras as Store>::CodeByHash::contains_key(validation_code.hash()));
}
fn check_code_is_not_stored(validation_code: &ValidationCode) {
assert!(!<Paras as Store>::CodeByHashRefs::contains_key(validation_code.hash()));
assert!(!<Paras as Store>::CodeByHash::contains_key(validation_code.hash()));
}
#[test]
fn para_past_code_pruning_works_correctly() {
let mut past_code = ParaPastCodeMeta::default();
past_code.note_replacement(10u32, 10);
past_code.note_replacement(20, 25);
past_code.note_replacement(30, 35);
let old = past_code.clone();
assert!(past_code.prune_up_to(9).collect::<Vec<_>>().is_empty());
assert_eq!(old, past_code);
assert_eq!(past_code.prune_up_to(10).collect::<Vec<_>>(), vec![10]);
assert_eq!(
past_code,
ParaPastCodeMeta {
upgrade_times: vec![upgrade_at(20, 25), upgrade_at(30, 35)],
last_pruned: Some(10),
}
);
assert!(past_code.prune_up_to(21).collect::<Vec<_>>().is_empty());
assert_eq!(past_code.prune_up_to(26).collect::<Vec<_>>(), vec![20]);
assert_eq!(
past_code,
ParaPastCodeMeta { upgrade_times: vec![upgrade_at(30, 35)], last_pruned: Some(25) }
);
past_code.note_replacement(40, 42);
past_code.note_replacement(50, 53);
past_code.note_replacement(60, 66);
assert_eq!(
past_code,
ParaPastCodeMeta {
upgrade_times: vec![
upgrade_at(30, 35),
upgrade_at(40, 42),
upgrade_at(50, 53),
upgrade_at(60, 66)
],
last_pruned: Some(25),
}
);
assert_eq!(past_code.prune_up_to(60).collect::<Vec<_>>(), vec![30, 40, 50]);
assert_eq!(
past_code,
ParaPastCodeMeta { upgrade_times: vec![upgrade_at(60, 66)], last_pruned: Some(53) }
);
assert_eq!(past_code.most_recent_change(), Some(60));
assert_eq!(past_code.prune_up_to(66).collect::<Vec<_>>(), vec![60]);
assert_eq!(
past_code,
ParaPastCodeMeta { upgrade_times: Vec::new(), last_pruned: Some(66) }
);