"node/[email protected]:radupopa2010/substrate.git" did not exist on "d5dc301e613859f6b156fa23641a5d6c8285780b"
Newer
Older
pub(crate) fn initializer_initialize(now: T::BlockNumber) -> Weight {
let weight = Self::prune_old_code(now);
weight + Self::process_scheduled_upgrade_changes(now)
/// Called by the initializer to finalize the paras pallet.
pub(crate) fn initializer_finalize(now: T::BlockNumber) {
Self::process_scheduled_upgrade_cooldowns(now);
}
/// Called by the initializer to note that a new session has started.
/// Returns the list of outgoing paras from the actions queue.
pub(crate) fn initializer_on_new_session(
notification: &SessionChangeNotification<T::BlockNumber>,
) -> Vec<ParaId> {
let outgoing_paras = Self::apply_actions_queue(notification.session_index);
Self::groom_ongoing_pvf_votes(¬ification.new_config, notification.validators.len());
/// The validation code of live para.
pub(crate) fn current_code(para_id: &ParaId) -> Option<ValidationCode> {
Self::current_code_hash(para_id).and_then(|code_hash| {
let code = CodeByHash::<T>::get(&code_hash);
if code.is_none() {
log::error!(
"Pallet paras storage is inconsistent, code not found for hash {}",
code_hash,
);
debug_assert!(false, "inconsistent paras storages");
}
code
})
}
// Apply all para actions queued for the given session index.
//
// The actions to take are based on the lifecycle of of the paras.
//
// The final state of any para after the actions queue should be as a
// parachain, parathread, or not registered. (stable states)
//
// Returns the list of outgoing paras from the actions queue.
fn apply_actions_queue(session: SessionIndex) -> Vec<ParaId> {
let actions = ActionsQueue::<T>::take(session);
let mut parachains = <Self as Store>::Parachains::get();
Shaun Wang
committed
let now = <frame_system::Pallet<T>>::block_number();
let mut outgoing = Vec::new();
for para in actions {
let lifecycle = ParaLifecycles::<T>::get(¶);
None | Some(ParaLifecycle::Parathread) | Some(ParaLifecycle::Parachain) => { /* Nothing to do... */
},
Some(ParaLifecycle::Onboarding) => {
if let Some(genesis_data) = <Self as Store>::UpcomingParasGenesis::take(¶) {
if genesis_data.parachain {
if let Err(i) = parachains.binary_search(¶) {
parachains.insert(i, para);
}
ParaLifecycles::<T>::insert(¶, ParaLifecycle::Parachain);
ParaLifecycles::<T>::insert(¶, ParaLifecycle::Parathread);
// HACK: see the notice in `schedule_para_initialize`.
//
// Apparently, this is left over from a prior version of the runtime.
// To handle this we just insert the code and link the current code hash
// to it.
if !genesis_data.validation_code.0.is_empty() {
let code_hash = genesis_data.validation_code.hash();
Self::increase_code_ref(&code_hash, &genesis_data.validation_code);
<Self as Store>::CurrentCodeHash::insert(¶, code_hash);
}
<Self as Store>::Heads::insert(¶, genesis_data.genesis_head);
}
},
// Upgrade a parathread to a parachain
Some(ParaLifecycle::UpgradingParathread) => {
if let Err(i) = parachains.binary_search(¶) {
parachains.insert(i, para);
}
ParaLifecycles::<T>::insert(¶, ParaLifecycle::Parachain);
},
// Downgrade a parachain to a parathread
Some(ParaLifecycle::DowngradingParachain) => {
if let Ok(i) = parachains.binary_search(¶) {
parachains.remove(i);
}
ParaLifecycles::<T>::insert(¶, ParaLifecycle::Parathread);
},
// Offboard a parathread or parachain from the system
Some(ParaLifecycle::OffboardingParachain) |
Some(ParaLifecycle::OffboardingParathread) => {
if let Ok(i) = parachains.binary_search(¶) {
parachains.remove(i);
}
<Self as Store>::Heads::remove(¶);
<Self as Store>::FutureCodeUpgrades::remove(¶);
<Self as Store>::UpgradeGoAheadSignal::remove(¶);
<Self as Store>::UpgradeRestrictionSignal::remove(¶);
let removed_future_code_hash = <Self as Store>::FutureCodeHash::take(¶);
if let Some(removed_future_code_hash) = removed_future_code_hash {
Self::decrease_code_ref(&removed_future_code_hash);
}
let removed_code_hash = <Self as Store>::CurrentCodeHash::take(¶);
if let Some(removed_code_hash) = removed_code_hash {
Self::note_past_code(para, now, now, removed_code_hash);
}
outgoing.push(para);
},
}
if !outgoing.is_empty() {
// Filter offboarded parachains from the upcoming upgrades and upgrade cooldowns list.
//
// We do it after the offboarding to get away with only a single read/write per list.
//
// NOTE both of those iterates over the list and the outgoing. We do not expect either
// of these to be large. Thus should be fine.
<Self as Store>::UpcomingUpgrades::mutate(|upcoming_upgrades| {
*upcoming_upgrades = mem::take(upcoming_upgrades)
.into_iter()
.filter(|&(ref para, _)| !outgoing.contains(para))
.collect();
});
<Self as Store>::UpgradeCooldowns::mutate(|upgrade_cooldowns| {
*upgrade_cooldowns = mem::take(upgrade_cooldowns)
.into_iter()
.filter(|&(ref para, _)| !outgoing.contains(para))
.collect();
});
}
// Place the new parachains set in storage.
<Self as Store>::Parachains::set(parachains);
return outgoing
// note replacement of the code of para with given `id`, which occured in the
// context of the given relay-chain block number. provide the replaced code.
//
// `at` for para-triggered replacement is the block number of the relay-chain
// block in whose context the parablock was executed
// (i.e. number of `relay_parent` in the receipt)
fn note_past_code(
id: ParaId,
at: T::BlockNumber,
now: T::BlockNumber,
old_code_hash: ValidationCodeHash,
) -> Weight {
<Self as Store>::PastCodeMeta::mutate(&id, |past_meta| {
past_meta.note_replacement(at, now);
});
<Self as Store>::PastCodeHash::insert(&(id, at), old_code_hash);
// Schedule pruning for this past-code to be removed as soon as it
// exits the slashing window.
<Self as Store>::PastCodePruning::mutate(|pruning| {
pruning.binary_search_by_key(&now, |&(_, b)| b).unwrap_or_else(|idx| idx);
pruning.insert(insert_idx, (id, now));
});
T::DbWeight::get().reads_writes(2, 3)
}
// looks at old code metadata, compares them to the current acceptance window, and prunes those
// that are too old.
fn prune_old_code(now: T::BlockNumber) -> Weight {
let config = configuration::Pallet::<T>::config();
let code_retention_period = config.code_retention_period;
if now <= code_retention_period {
let weight = T::DbWeight::get().reads_writes(1, 0);
}
// The height of any changes we no longer should keep around.
let pruning_height = now - (code_retention_period + One::one());
let pruning_tasks_done = <Self as Store>::PastCodePruning::mutate(
|pruning_tasks: &mut Vec<(_, T::BlockNumber)>| {
let (pruning_tasks_done, pruning_tasks_to_do) = {
// find all past code that has just exited the pruning window.
let up_to_idx =
pruning_tasks.iter().take_while(|&(_, at)| at <= &pruning_height).count();
(up_to_idx, pruning_tasks.drain(..up_to_idx))
};
for (para_id, _) in pruning_tasks_to_do {
let full_deactivate = <Self as Store>::PastCodeMeta::mutate(¶_id, |meta| {
for pruned_repl_at in meta.prune_up_to(pruning_height) {
let removed_code_hash =
<Self as Store>::PastCodeHash::take(&(para_id, pruned_repl_at));
if let Some(removed_code_hash) = removed_code_hash {
Self::decrease_code_ref(&removed_code_hash);
} else {
log::warn!(
"Missing code for removed hash {:?}",
removed_code_hash,
);
}
meta.is_empty() && Self::para_head(¶_id).is_none()
});
// This parachain has been removed and now the vestigial code
// has been removed from the state. clean up meta as well.
if full_deactivate {
<Self as Store>::PastCodeMeta::remove(¶_id);
}
}
pruning_tasks_done as u64
// 1 read for the meta for each pruning task, 1 read for the config
// 2 writes: updating the meta and pruning the code
T::DbWeight::get().reads_writes(1 + pruning_tasks_done, 2 * pruning_tasks_done)
}
/// Process the timers related to upgrades. Specifically, the upgrade go ahead signals toggle
/// and the upgrade cooldown restrictions. However, this function does not actually unset
/// the upgrade restriction, that will happen in the `initializer_finalize` function. However,
/// this function does count the number of cooldown timers expired so that we can reserve weight
/// for the `initializer_finalize` function.
fn process_scheduled_upgrade_changes(now: T::BlockNumber) -> Weight {
// account weight for `UpcomingUpgrades::mutate`.
let mut weight = T::DbWeight::get().reads_writes(1, 1);
let upgrades_signaled = <Self as Store>::UpcomingUpgrades::mutate(
|upcoming_upgrades: &mut Vec<(ParaId, T::BlockNumber)>| {
let num = upcoming_upgrades.iter().take_while(|&(_, at)| at <= &now).count();
for (para, _) in upcoming_upgrades.drain(..num) {
<Self as Store>::UpgradeGoAheadSignal::insert(¶, UpgradeGoAhead::GoAhead);
}
num
},
);
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
weight += T::DbWeight::get().writes(upgrades_signaled as u64);
// account weight for `UpgradeCooldowns::get`.
weight += T::DbWeight::get().reads(1);
let cooldowns_expired = <Self as Store>::UpgradeCooldowns::get()
.iter()
.take_while(|&(_, at)| at <= &now)
.count();
// reserve weight for `initializer_finalize`:
// - 1 read and 1 write for `UpgradeCooldowns::mutate`.
// - 1 write per expired cooldown.
weight += T::DbWeight::get().reads_writes(1, 1);
weight += T::DbWeight::get().reads(cooldowns_expired as u64);
weight
}
/// Actually perform unsetting the expired upgrade restrictions.
///
/// See `process_scheduled_upgrade_changes` for more details.
fn process_scheduled_upgrade_cooldowns(now: T::BlockNumber) {
<Self as Store>::UpgradeCooldowns::mutate(
|upgrade_cooldowns: &mut Vec<(ParaId, T::BlockNumber)>| {
for &(para, _) in upgrade_cooldowns.iter().take_while(|&(_, at)| at <= &now) {
<Self as Store>::UpgradeRestrictionSignal::remove(¶);
}
},
);
}
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
/// Goes over all PVF votes in progress, reinitializes ballots, increments ages and prunes the
/// active votes that reached their time-to-live.
fn groom_ongoing_pvf_votes(
cfg: &configuration::HostConfiguration<T::BlockNumber>,
new_n_validators: usize,
) -> Weight {
let mut weight = T::DbWeight::get().reads(1);
let potentially_active_votes = PvfActiveVoteList::<T>::get();
// Initially empty list which contains all the PVF active votes that made it through this
// session change.
//
// **Ordered** as well as `PvfActiveVoteList`.
let mut actually_active_votes = Vec::with_capacity(potentially_active_votes.len());
for vote_subject in potentially_active_votes {
let mut vote_state = match PvfActiveVoteMap::<T>::take(&vote_subject) {
Some(v) => v,
None => {
// This branch should never be reached. This is due to the fact that the set of
// `PvfActiveVoteMap`'s keys is always equal to the set of items found in
// `PvfActiveVoteList`.
log::warn!(
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
"The PvfActiveVoteMap is out of sync with PvfActiveVoteList!",
);
debug_assert!(false);
continue
},
};
vote_state.age += 1;
if vote_state.age < cfg.pvf_voting_ttl {
weight += T::DbWeight::get().writes(1);
vote_state.reinitialize_ballots(new_n_validators);
PvfActiveVoteMap::<T>::insert(&vote_subject, vote_state);
// push maintaining the original order.
actually_active_votes.push(vote_subject);
} else {
// TTL is reached. Reject.
weight += Self::enact_pvf_rejected(&vote_subject, vote_state.causes);
}
}
weight += T::DbWeight::get().writes(1);
PvfActiveVoteList::<T>::put(actually_active_votes);
weight
}
fn enact_pvf_accepted(
now: T::BlockNumber,
code_hash: &ValidationCodeHash,
causes: &[PvfCheckCause<T::BlockNumber>],
sessions_observed: SessionIndex,
cfg: &configuration::HostConfiguration<T::BlockNumber>,
) -> Weight {
let mut weight = 0;
for cause in causes {
match cause {
PvfCheckCause::Onboarding(id) => {
weight += Self::proceed_with_onboarding(*id, sessions_observed);
},
PvfCheckCause::Upgrade { id, relay_parent_number } => {
weight +=
Self::proceed_with_upgrade(*id, code_hash, now, *relay_parent_number, cfg);
},
}
}
weight
}
fn proceed_with_onboarding(id: ParaId, sessions_observed: SessionIndex) -> Weight {
let weight = T::DbWeight::get().reads_writes(2, 1);
// we should onboard only after `SESSION_DELAY` sessions but we should take
// into account the number of sessions the PVF pre-checking occupied.
//
// we cannot onboard at the current session, so it must be at least one
// session ahead.
let onboard_at: SessionIndex = shared::Pallet::<T>::session_index() +
cmp::max(shared::SESSION_DELAY.saturating_sub(sessions_observed), 1);
ActionsQueue::<T>::mutate(onboard_at, |v| {
if let Err(i) = v.binary_search(&id) {
v.insert(i, id);
}
});
weight
}
fn proceed_with_upgrade(
id: ParaId,
code_hash: &ValidationCodeHash,
now: T::BlockNumber,
relay_parent_number: T::BlockNumber,
cfg: &configuration::HostConfiguration<T::BlockNumber>,
) -> Weight {
let mut weight = 0;
// Compute the relay-chain block number starting at which the code upgrade is ready to be
// applied.
//
// The first parablock that has a relay-parent higher or at the same height of `expected_at`
// will trigger the code upgrade. The parablock that comes after that will be validated
// against the new validation code.
//
// Here we are trying to choose the block number that will have `validation_upgrade_delay`
// blocks from the relay-parent of the block that schedule code upgrade but no less than
// `minimum_validation_upgrade_delay`. We want this delay out of caution so that when
// the last vote for pre-checking comes the parachain will have some time until the upgrade
// finally takes place.
let expected_at = cmp::max(
relay_parent_number + cfg.validation_upgrade_delay,
now + cfg.minimum_validation_upgrade_delay,
);
weight += T::DbWeight::get().reads_writes(1, 4);
FutureCodeUpgrades::<T>::insert(&id, expected_at);
<Self as Store>::UpcomingUpgrades::mutate(|upcoming_upgrades| {
let insert_idx = upcoming_upgrades
.binary_search_by_key(&expected_at, |&(_, b)| b)
.unwrap_or_else(|idx| idx);
upcoming_upgrades.insert(insert_idx, (id, expected_at));
});
let expected_at = expected_at.saturated_into();
let log = ConsensusLog::ParaScheduleUpgradeCode(id, *code_hash, expected_at);
<frame_system::Pallet<T>>::deposit_log(log.into());
weight
}
fn enact_pvf_rejected(
code_hash: &ValidationCodeHash,
causes: Vec<PvfCheckCause<T::BlockNumber>>,
) -> Weight {
let mut weight = T::DbWeight::get().writes(1);
for cause in causes {
// Whenever PVF pre-checking is started or a new cause is added to it, the RC is bumped.
// Now we need to unbump it.
weight += Self::decrease_code_ref(code_hash);
match cause {
PvfCheckCause::Onboarding(id) => {
// Here we need to undo everything that was done during `schedule_para_initialize`.
// Essentially, the logic is similar to offboarding, with exception that before
// actual onboarding the parachain did not have a chance to reach to upgrades.
// Therefore we can skip all the upgrade related storage items here.
weight += T::DbWeight::get().writes(3);
UpcomingParasGenesis::<T>::remove(&id);
CurrentCodeHash::<T>::remove(&id);
ParaLifecycles::<T>::remove(&id);
},
PvfCheckCause::Upgrade { id, .. } => {
weight += T::DbWeight::get().writes(2);
UpgradeGoAheadSignal::<T>::insert(&id, UpgradeGoAhead::Abort);
FutureCodeHash::<T>::remove(&id);
},
}
}
weight
}
/// Verify that `schedule_para_initialize` can be called successfully.
///
/// Returns false if para is already registered in the system.
pub fn can_schedule_para_initialize(id: &ParaId) -> bool {
ParaLifecycles::<T>::get(id).is_none()
/// Schedule a para to be initialized. If the validation code is not already stored in the
/// code storage, then a PVF pre-checking process will be initiated.
/// Only after the PVF pre-checking succeeds can the para be onboarded. Note, that calling this
/// does not guarantee that the parachain will eventually be onboarded. This can happen in case
/// the PVF does not pass PVF pre-checking.
///
/// The Para ID should be not activated in this pallet. The validation code supplied in
/// `genesis_data` should not be empty. If those conditions are not met, then the para cannot
/// be onboarded.
pub(crate) fn schedule_para_initialize(
id: ParaId,
mut genesis_data: ParaGenesisArgs,
) -> DispatchResult {
// Make sure parachain isn't already in our system and that the onboarding parameters are
// valid.
ensure!(Self::can_schedule_para_initialize(&id), Error::<T>::CannotOnboard);
ensure!(!genesis_data.validation_code.0.is_empty(), Error::<T>::CannotOnboard);
ParaLifecycles::<T>::insert(&id, ParaLifecycle::Onboarding);
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
// HACK: here we are doing something nasty.
//
// In order to fix the [soaking issue] we insert the code eagerly here. When the onboarding
// is finally enacted, we do not need to insert the code anymore. Therefore, there is no
// reason for the validation code to be copied into the `ParaGenesisArgs`. We also do not
// want to risk it by copying the validation code needlessly to not risk adding more
// memory pressure.
//
// That said, we also want to preserve `ParaGenesisArgs` as it is, for now. There are two
// reasons:
//
// - Doing it within the context of the PR that introduces this change is undesirable, since
// it is already a big change, and that change would require a migration. Moreover, if we
// run the new version of the runtime, there will be less things to worry about during
// the eventual proper migration.
//
// - This data type already is used for generating genesis, and changing it will probably
// introduce some unnecessary burden.
//
// So instead of going through it right now, we will do something sneaky. Specifically:
//
// - Insert the `CurrentCodeHash` now, instead during the onboarding. That would allow to
// get rid of hashing of the validation code when onboarding.
//
// - Replace `validation_code` with a sentinel value: an empty vector. This should be fine
// as long we do not allow registering parachains with empty code. At the moment of writing
// this should already be the case.
//
// - Empty value is treated as the current code is already inserted during the onboarding.
//
// This is only an intermediate solution and should be fixed in foreseable future.
//
// [soaking issue]: https://github.com/paritytech/polkadot/issues/3918
let validation_code =
mem::replace(&mut genesis_data.validation_code, ValidationCode(Vec::new()));
UpcomingParasGenesis::<T>::insert(&id, genesis_data);
let validation_code_hash = validation_code.hash();
<Self as Store>::CurrentCodeHash::insert(&id, validation_code_hash);
let cfg = configuration::Pallet::<T>::config();
Self::kick_off_pvf_check(
PvfCheckCause::Onboarding(id),
validation_code_hash,
validation_code,
&cfg,
);
}
/// Schedule a para to be cleaned up at the start of the next session.
/// Will return error if either is true:
///
/// - para is not a stable parachain or parathread (i.e. [`ParaLifecycle::is_stable`] is `false`)
/// - para has a pending upgrade.
///
/// No-op if para is not registered at all.
pub(crate) fn schedule_para_cleanup(id: ParaId) -> DispatchResult {
// Disallow offboarding in case there is an upcoming upgrade.
//
// This is not a fundamential limitation but rather simplification: it allows us to get
// away without introducing additional logic for pruning and, more importantly, enacting
// ongoing PVF pre-checking votes. It also removes some nasty edge cases.
//
// This implicitly assumes that the given para exists, i.e. it's lifecycle != None.
if FutureCodeHash::<T>::contains_key(&id) {
return Err(Error::<T>::CannotOffboard.into())
}
let lifecycle = ParaLifecycles::<T>::get(&id);
// If para is not registered, nothing to do!
Some(ParaLifecycle::Parathread) => {
ParaLifecycles::<T>::insert(&id, ParaLifecycle::OffboardingParathread);
Some(ParaLifecycle::Parachain) => {
ParaLifecycles::<T>::insert(&id, ParaLifecycle::OffboardingParachain);
_ => return Err(Error::<T>::CannotOffboard)?,
let scheduled_session = Self::scheduled_session();
ActionsQueue::<T>::mutate(scheduled_session, |v| {
if let Err(i) = v.binary_search(&id) {
v.insert(i, id);
}
});
Ok(())
}
/// Schedule a parathread to be upgraded to a parachain.
///
/// Will return error if `ParaLifecycle` is not `Parathread`.
pub(crate) fn schedule_parathread_upgrade(id: ParaId) -> DispatchResult {
let scheduled_session = Self::scheduled_session();
let lifecycle = ParaLifecycles::<T>::get(&id).ok_or(Error::<T>::NotRegistered)?;
ensure!(lifecycle == ParaLifecycle::Parathread, Error::<T>::CannotUpgrade);
ParaLifecycles::<T>::insert(&id, ParaLifecycle::UpgradingParathread);
ActionsQueue::<T>::mutate(scheduled_session, |v| {
if let Err(i) = v.binary_search(&id) {
v.insert(i, id);
}
/// Schedule a parachain to be downgraded to a parathread.
///
/// Noop if `ParaLifecycle` is not `Parachain`.
pub(crate) fn schedule_parachain_downgrade(id: ParaId) -> DispatchResult {
let scheduled_session = Self::scheduled_session();
let lifecycle = ParaLifecycles::<T>::get(&id).ok_or(Error::<T>::NotRegistered)?;
ensure!(lifecycle == ParaLifecycle::Parachain, Error::<T>::CannotDowngrade);
ParaLifecycles::<T>::insert(&id, ParaLifecycle::DowngradingParachain);
ActionsQueue::<T>::mutate(scheduled_session, |v| {
if let Err(i) = v.binary_search(&id) {
v.insert(i, id);
/// Schedule a future code upgrade of the given parachain.
///
/// If the new code is not known, then the PVF pre-checking will be started for that validation
/// code. In case the validation code does not pass the PVF pre-checking process, the
/// upgrade will be aborted.
///
/// Only after the code is approved by the process, the upgrade can be scheduled. Specifically,
/// the relay-chain block number will be determined at which the upgrade will take place. We
/// call that block `expected_at`.
///
/// Once the candidate with the relay-parent >= `expected_at` is enacted, the new validation code
/// will be applied. Therefore, the new code will be used to validate the next candidate.
/// The new code should not be equal to the current one, otherwise the upgrade will be aborted.
/// If there is already a scheduled code upgrade for the para, this is a no-op.
pub(crate) fn schedule_code_upgrade(
id: ParaId,
new_code: ValidationCode,
relay_parent_number: T::BlockNumber,
cfg: &configuration::HostConfiguration<T::BlockNumber>,
let mut weight = T::DbWeight::get().reads(1);
// Enacting this should be prevented by the `can_schedule_upgrade`
if FutureCodeHash::<T>::contains_key(&id) {
// This branch should never be reached. Signalling an upgrade is disallowed for a para
// that already has one upgrade scheduled.
//
// Any candidate that attempts to do that should be rejected by
// `can_upgrade_validation_code`.
//
// NOTE: we cannot set `UpgradeGoAheadSignal` signal here since this will be reset by
// the following call `note_new_head`
log::warn!(target: LOG_TARGET, "ended up scheduling an upgrade while one is pending",);
return weight
}
let code_hash = new_code.hash();
// para signals an update to the same code? This does not make a lot of sense, so abort the
// process right away.
//
// We do not want to allow this since it will mess with the code reference counting.
weight += T::DbWeight::get().reads(1);
if CurrentCodeHash::<T>::get(&id) == Some(code_hash) {
// NOTE: we cannot set `UpgradeGoAheadSignal` signal here since this will be reset by
// the following call `note_new_head`
log::warn!(
"para tried to upgrade to the same code. Abort the upgrade",
);
return weight
}
// This is the start of the upgrade process. Prevent any further attempts at upgrading.
weight += T::DbWeight::get().writes(2);
FutureCodeHash::<T>::insert(&id, &code_hash);
UpgradeRestrictionSignal::<T>::insert(&id, UpgradeRestriction::Present);
weight += T::DbWeight::get().reads_writes(1, 1);
let next_possible_upgrade_at = relay_parent_number + cfg.validation_upgrade_cooldown;
<Self as Store>::UpgradeCooldowns::mutate(|upgrade_cooldowns| {
let insert_idx = upgrade_cooldowns
.binary_search_by_key(&next_possible_upgrade_at, |&(_, b)| b)
.unwrap_or_else(|idx| idx);
upgrade_cooldowns.insert(insert_idx, (id, next_possible_upgrade_at));
});
weight += Self::kick_off_pvf_check(
PvfCheckCause::Upgrade { id, relay_parent_number },
code_hash,
new_code,
cfg,
);
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
weight
}
/// Makes sure that the given code hash has passed pre-checking.
///
/// If the given code hash has already passed pre-checking, then the approval happens
/// immediately. Similarly, if the pre-checking is turned off, the update is scheduled immediately
/// as well. In this case, the behavior is similar to the previous, i.e. the upgrade sequence
/// is purely time-based.
///
/// If the code is unknown, but the pre-checking for that PVF is already running then we perform
/// "coalescing". We save the cause for this PVF pre-check request and just add it to the
/// existing active PVF vote.
///
/// And finally, if the code is unknown and pre-checking is not running, we start the
/// pre-checking process anew.
///
/// Unconditionally increases the reference count for the passed `code`.
fn kick_off_pvf_check(
cause: PvfCheckCause<T::BlockNumber>,
code_hash: ValidationCodeHash,
code: ValidationCode,
cfg: &configuration::HostConfiguration<T::BlockNumber>,
) -> Weight {
let mut weight = 0;
weight += T::DbWeight::get().reads(1);
match PvfActiveVoteMap::<T>::get(&code_hash) {
None => {
// We deliberately are using `CodeByHash` here instead of the `CodeByHashRefs`. This
// is because the code may have been added by `add_trusted_validation_code`.
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
let known_code = CodeByHash::<T>::contains_key(&code_hash);
weight += T::DbWeight::get().reads(1);
if !cfg.pvf_checking_enabled || known_code {
// Either:
// - the code is known and there is no active PVF vote for it meaning it is
// already checked, or
// - the PVF checking is diabled
// In any case: fast track the PVF checking into the accepted state
weight += T::DbWeight::get().reads(1);
let now = <frame_system::Pallet<T>>::block_number();
weight += Self::enact_pvf_accepted(now, &code_hash, &[cause], 0, cfg);
} else {
// PVF is not being pre-checked and it is not known. Start a new pre-checking
// process.
weight += T::DbWeight::get().reads_writes(3, 2);
let now = <frame_system::Pallet<T>>::block_number();
let n_validators = shared::Pallet::<T>::active_validator_keys().len();
PvfActiveVoteMap::<T>::insert(
&code_hash,
PvfCheckActiveVoteState::new(now, n_validators, cause),
);
PvfActiveVoteList::<T>::mutate(|l| {
if let Err(idx) = l.binary_search(&code_hash) {
l.insert(idx, code_hash);
}
});
}
},
Some(mut vote_state) => {
// Coalescing: the PVF is already being pre-checked so we just need to piggy back
// on it.
weight += T::DbWeight::get().writes(1);
vote_state.causes.push(cause);
PvfActiveVoteMap::<T>::insert(&code_hash, vote_state);
},
}
// We increase the code RC here in any case. Intuitively the parachain that requested this
// action is now a user of that PVF.
//
// If the result of the pre-checking is reject, then we would decrease the RC for each cause,
// including the current.
//
// If the result of the pre-checking is accept, then we do nothing to the RC because the PVF
// will continue be used by the same users.
//
// If the PVF was fast-tracked (i.e. there is already non zero RC) and there is no
// pre-checking, we also do not change the RC then.
weight += Self::increase_code_ref(&code_hash, &code);
weight
}
/// Note that a para has progressed to a new head, where the new head was executed in the context
/// of a relay-chain block with given number. This will apply pending code upgrades based
/// on the relay-parent block number provided.
pub(crate) fn note_new_head(
id: ParaId,
new_head: HeadData,
execution_context: T::BlockNumber,
) -> Weight {
if let Some(expected_at) = <Self as Store>::FutureCodeUpgrades::get(&id) {
if expected_at <= execution_context {
<Self as Store>::FutureCodeUpgrades::remove(&id);
<Self as Store>::UpgradeGoAheadSignal::remove(&id);
// Both should always be `Some` in this case, since a code upgrade is scheduled.
let new_code_hash = if let Some(new_code_hash) = FutureCodeHash::<T>::take(&id) {
new_code_hash
} else {
log::error!(target: LOG_TARGET, "Missing future code hash for {:?}", &id);
return T::DbWeight::get().reads_writes(3, 1 + 3)
};
let maybe_prior_code_hash = CurrentCodeHash::<T>::get(&id);
CurrentCodeHash::<T>::insert(&id, &new_code_hash);
let log = ConsensusLog::ParaUpgradeCode(id, new_code_hash);
<frame_system::Pallet<T>>::deposit_log(log.into());
// `now` is only used for registering pruning as part of `fn note_past_code`
Shaun Wang
committed
let now = <frame_system::Pallet<T>>::block_number();
let weight = if let Some(prior_code_hash) = maybe_prior_code_hash {
Self::note_past_code(id, expected_at, now, prior_code_hash)
} else {
log::error!(target: LOG_TARGET, "Missing prior code hash for para {:?}", &id);
0 as Weight
};
// add 1 to writes due to heads update.
weight + T::DbWeight::get().reads_writes(3, 1 + 3)
} else {
T::DbWeight::get().reads_writes(1, 1 + 0)
}
} else {
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
// This means there is no upgrade scheduled.
//
// In case the upgrade was aborted by the relay-chain we should reset
// the `Abort` signal.
UpgradeGoAheadSignal::<T>::remove(&id);
T::DbWeight::get().reads_writes(1, 2)
}
}
/// Returns the list of PVFs (aka validation code) that require casting a vote by a validator in
/// the active validator set.
pub(crate) fn pvfs_require_precheck() -> Vec<ValidationCodeHash> {
PvfActiveVoteList::<T>::get()
}
/// Submits a given PVF check statement with corresponding signature as an unsigned transaction
/// into the memory pool. Ultimately, that disseminates the transaction accross the network.
///
/// This function expects an offchain context and cannot be callable from the on-chain logic.
///
/// The signature assumed to pertain to `stmt`.
pub(crate) fn submit_pvf_check_statement(
stmt: PvfCheckStatement,
signature: ValidatorSignature,
) {
use frame_system::offchain::SubmitTransaction;
if let Err(e) = SubmitTransaction::<T, Call<T>>::submit_unsigned_transaction(
Call::include_pvf_check_statement { stmt, signature }.into(),
) {
log::error!(target: LOG_TARGET, "Error submitting pvf check statement: {:?}", e,);
/// Returns the current lifecycle state of the para.
pub fn lifecycle(id: ParaId) -> Option<ParaLifecycle> {
/// Returns whether the given ID refers to a valid para.
///
/// Paras that are onboarding or offboarding are not included.
pub fn is_valid_para(id: ParaId) -> bool {
if let Some(state) = ParaLifecycles::<T>::get(&id) {
!state.is_onboarding() && !state.is_offboarding()
} else {
false
}
}
/// Whether a para ID corresponds to any live parachain.
///
/// Includes parachains which will downgrade to a parathread in the future.
pub fn is_parachain(id: ParaId) -> bool {
if let Some(state) = ParaLifecycles::<T>::get(&id) {
state.is_parachain()
} else {
false
}
/// Whether a para ID corresponds to any live parathread.
///
/// Includes parathreads which will upgrade to parachains in the future.
pub fn is_parathread(id: ParaId) -> bool {
if let Some(state) = ParaLifecycles::<T>::get(&id) {
state.is_parathread()
} else {
false
}
/// If a candidate from the specified parachain were submitted at the current block, this
/// function returns if that candidate passes the acceptance criteria.
pub(crate) fn can_upgrade_validation_code(id: ParaId) -> bool {
FutureCodeHash::<T>::get(&id).is_none() && UpgradeRestrictionSignal::<T>::get(&id).is_none()
/// Return the session index that should be used for any future scheduled changes.
fn scheduled_session() -> SessionIndex {
/// Store the validation code if not already stored, and increase the number of reference.
///
/// Returns the weight consumed.
fn increase_code_ref(code_hash: &ValidationCodeHash, code: &ValidationCode) -> Weight {
let mut weight = T::DbWeight::get().reads_writes(1, 1);
<Self as Store>::CodeByHashRefs::mutate(code_hash, |refs| {
if *refs == 0 {
weight += T::DbWeight::get().writes(1);
<Self as Store>::CodeByHash::insert(code_hash, code);
}
*refs += 1;
});
weight
/// Decrease the number of reference of the validation code and remove it from storage if zero
///
/// Returns the weight consumed.
fn decrease_code_ref(code_hash: &ValidationCodeHash) -> Weight {
let mut weight = T::DbWeight::get().reads(1);
let refs = <Self as Store>::CodeByHashRefs::get(code_hash);
if refs == 0 {
log::error!(target: LOG_TARGET, "Code refs is already zero for {:?}", code_hash);
return weight
}
weight += T::DbWeight::get().writes(2);
<Self as Store>::CodeByHash::remove(code_hash);
<Self as Store>::CodeByHashRefs::remove(code_hash);
} else {
weight += T::DbWeight::get().writes(1);
<Self as Store>::CodeByHashRefs::insert(code_hash, refs - 1);
}
weight
/// Test function for triggering a new session in this pallet.
#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))]
pub fn test_on_new_session() {
Self::initializer_on_new_session(&SessionChangeNotification {
session_index: shared::Pallet::<T>::session_index(),
..Default::default()
});
}
#[cfg(any(feature = "runtime-benchmarks", test))]
pub fn heads_insert(para_id: &ParaId, head_data: HeadData) {
Heads::<T>::insert(para_id, head_data);
}
}
#[cfg(test)]
mod tests {
use super::*;
use frame_support::{assert_err, assert_ok, assert_storage_noop};
use keyring::Sr25519Keyring;
use primitives::{
v0::PARACHAIN_KEY_TYPE_ID,
v1::{BlockNumber, ValidatorId},
};
use sc_keystore::LocalKeystore;
use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr};
use std::sync::Arc;
use test_helpers::{dummy_head_data, dummy_validation_code};
use crate::{
configuration::HostConfiguration,
mock::{
new_test_ext, Configuration, MockGenesisConfig, Origin, Paras, ParasShared, System,
Test,
},
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
static VALIDATORS: &[Sr25519Keyring] = &[
Sr25519Keyring::Alice,
Sr25519Keyring::Bob,
Sr25519Keyring::Charlie,
Sr25519Keyring::Dave,
Sr25519Keyring::Ferdie,
];
fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> Vec<ValidatorId> {
val_ids.iter().map(|v| v.public().into()).collect()
}
fn sign_and_include_pvf_check_statement(stmt: PvfCheckStatement) {
let validators = &[
Sr25519Keyring::Alice,
Sr25519Keyring::Bob,
Sr25519Keyring::Charlie,
Sr25519Keyring::Dave,
Sr25519Keyring::Ferdie,
];
let signature = validators[stmt.validator_index.0 as usize].sign(&stmt.signing_payload());
Paras::include_pvf_check_statement(None.into(), stmt, signature.into()).unwrap();
}
fn run_to_block(to: BlockNumber, new_session: Option<Vec<BlockNumber>>) {
let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
for validator in VALIDATORS.iter() {
SyncCryptoStore::sr25519_generate_new(