diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index 3fff6312f333f0c66efce79554f7a7dd3a58377c..ab0ab685e69119fc572c66ea3a7c2a395149bb4b 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -882,7 +882,7 @@ enum Releases { impl Default for Releases { fn default() -> Self { - Releases::V8_0_0 + Releases::V10_0_0 } } diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index 101cac0a31348e0ae42925c0ec49eefcc62ece81..7e3bf6ccb93e1680b876b723102e84ba24ec035d 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -27,13 +27,29 @@ pub mod v10 { #[storage_alias] type EarliestUnappliedSlash<T: Config> = StorageValue<Pallet<T>, EraIndex>; + /// Apply any pending slashes that where queued. + /// + /// That means we might slash someone a bit too early, but we will definitely + /// won't forget to slash them. The cap of 512 is somewhat randomly taken to + /// prevent us from iterating over an arbitrary large number of keys `on_runtime_upgrade`. pub struct MigrateToV10<T>(sp_std::marker::PhantomData<T>); impl<T: Config> OnRuntimeUpgrade for MigrateToV10<T> { fn on_runtime_upgrade() -> frame_support::weights::Weight { if StorageVersion::<T>::get() == Releases::V9_0_0 { + let pending_slashes = <Pallet<T> as Store>::UnappliedSlashes::iter().take(512); + for (era, slashes) in pending_slashes { + for slash in slashes { + // in the old slashing scheme, the slash era was the key at which we read + // from `UnappliedSlashes`. + log!(warn, "prematurely applying a slash ({:?}) for era {:?}", slash, era); + slashing::apply_slash::<T>(slash, era); + } + } + EarliestUnappliedSlash::<T>::kill(); StorageVersion::<T>::put(Releases::V10_0_0); + log!(info, "MigrateToV10 executed successfully"); T::DbWeight::get().reads_writes(1, 1) } else { log!(warn, "MigrateToV10 should be removed.");