From 72841b731727e69db38f9bd616190aa8d50a56ba Mon Sep 17 00:00:00 2001 From: kianenigma <kian@parity.io> Date: Wed, 29 Jan 2025 15:53:14 +0000 Subject: [PATCH] refactor into MinerConfig --- substrate/bin/node/runtime/src/lib.rs | 27 +- .../src/helpers.rs | 38 +- .../election-provider-multi-block/src/lib.rs | 121 ++- .../src/mock/mod.rs | 44 +- .../src/mock/signed.rs | 7 +- .../src/signed/mod.rs | 24 +- .../src/types.rs | 93 +- .../src/unsigned/miner.rs | 850 +++++++++--------- .../src/unsigned/mod.rs | 51 +- .../src/verifier/impls.rs | 207 +++-- .../src/verifier/mod.rs | 24 +- .../election-provider-support/src/lib.rs | 83 +- .../election-provider-support/src/onchain.rs | 9 +- .../election-provider-support/src/tests.rs | 5 +- 14 files changed, 834 insertions(+), 749 deletions(-) diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index d844cffd045..e00680d1954 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -914,14 +914,31 @@ pub(crate) mod multi_block_impls { parameter_types! { pub Pages: u32 = 4; // nominators snapshot size - pub VoterSnapshotPerBlock: u32 = 22500 / 4; + pub VoterSnapshotPerBlock: u32 = 22500 / Pages::get(); // validator snapshot size pub TargetSnapshotPerBlock: u32 = 1000; pub SignedPhase: u32 = EPOCH_DURATION_IN_BLOCKS / 4; - pub SignedValidation: u32 = 8; + pub SignedValidation: u32 = Pages::get() * 2; pub UnsignedPhase: u32 = EPOCH_DURATION_IN_BLOCKS / 4; } + impl multi_block::unsigned::miner::MinerConfig for Runtime { + type AccountId = AccountId; + type Hash = Hash; + type MaxBackersPerWinner = <Self as multi_block::verifier::Config>::MaxBackersPerWinner; + type MaxBackersPerWinnerFinal = + <Self as multi_block::verifier::Config>::MaxBackersPerWinnerFinal; + type MaxWinnersPerPage = <Self as multi_block::verifier::Config>::MaxWinnersPerPage; + type MaxVotesPerVoter = + <<Self as multi_block::Config>::DataProvider as ElectionDataProvider>::MaxVotesPerVoter; + type MaxLength = MinerMaxLength; + type Solver = <Runtime as multi_block::unsigned::Config>::OffchainSolver; + type Pages = Pages; + type Solution = NposSolution16; + type VoterSnapshotPerBlock = <Runtime as multi_block::Config>::VoterSnapshotPerBlock; + type TargetSnapshotPerBlock = <Runtime as multi_block::Config>::TargetSnapshotPerBlock; + } + impl multi_block::Config for Runtime { type AdminOrigin = EnsureRoot<AccountId>; type RuntimeEvent = RuntimeEvent; @@ -939,10 +956,10 @@ pub(crate) mod multi_block_impls { // TODO: sanity check that the length of all phases is within reason. type SignedPhase = SignedPhase; type UnsignedPhase = UnsignedPhase; - type Solution = NposSolution16; type TargetSnapshotPerBlock = TargetSnapshotPerBlock; type VoterSnapshotPerBlock = VoterSnapshotPerBlock; type Verifier = MultiBlockVerifier; + type MinerConfig = Self; type WeightInfo = (); } @@ -979,8 +996,6 @@ pub(crate) mod multi_block_impls { impl multi_block::unsigned::Config for Runtime { // TODO: split into MinerConfig so the staker miner can easily configure these. // miner configs. - type MinerMaxLength = MinerMaxLength; - type MinerMaxWeight = MinerMaxWeight; type OffchainSolver = <Runtime as multi_phase::Config>::Solver; // offchain usage of miner configs @@ -1032,7 +1047,7 @@ parameter_types! { pub ElectionBoundsMultiPhase: ElectionBounds = ElectionBoundsBuilder::default() .voters_count(5000.into()).targets_count(10.into()).build(); pub ElectionBoundsOnChain: ElectionBounds = ElectionBoundsBuilder::default() - .voters_count(50_000.into()).targets_count(1000.into()).build(); + .voters_count(1000.into()).targets_count(1000.into()).build(); pub MaxNominations: u32 = <NposSolution16 as frame_election_provider_support::NposSolution>::LIMIT as u32; /// The maximum winners that can be elected by the Election pallet which is equivalent to the diff --git a/substrate/frame/election-provider-multi-block/src/helpers.rs b/substrate/frame/election-provider-multi-block/src/helpers.rs index 68b514145ab..9ff694c97c7 100644 --- a/substrate/frame/election-provider-multi-block/src/helpers.rs +++ b/substrate/frame/election-provider-multi-block/src/helpers.rs @@ -19,7 +19,8 @@ use crate::{ types::{PageIndex, VoterOf}, - AllVoterPagesOf, Config, SolutionTargetIndexOf, SolutionVoterIndexOf, VoteWeight, + unsigned::miner::MinerConfig, + AllVoterPagesOf, SolutionTargetIndexOf, SolutionVoterIndexOf, VoteWeight, }; use frame_support::{traits::Get, BoundedVec}; use sp_runtime::SaturatedConversion; @@ -35,6 +36,7 @@ macro_rules! log { }; } +#[macro_export] macro_rules! sublog { ($level:tt, $sub_pallet:tt, $pattern:expr $(, $values:expr)* $(,)?) => { #[cfg(not(feature = "std"))] @@ -47,8 +49,18 @@ macro_rules! sublog { }; } +#[macro_export] +macro_rules! miner_log { + ($level:tt, $pattern:expr $(, $values:expr)* $(,)?) => { + log::$level!( + target: $crate::LOG_PREFIX, + concat!("[â›ï¸miner] 🗳🗳🗳 ", $pattern) $(, $values)* + ) + }; +} + /// Generate an `efficient closure of voters and the page in which they live in. -pub fn generate_voter_page_fn<T: Config>( +pub fn generate_voter_page_fn<T: MinerConfig>( paged_snapshot: &AllVoterPagesOf<T>, ) -> impl Fn(&T::AccountId) -> Option<PageIndex> { let mut cache: BTreeMap<T::AccountId, PageIndex> = BTreeMap::new(); @@ -73,7 +85,7 @@ pub fn generate_voter_page_fn<T: Config>( /// voters. /// /// This can be used to efficiently build index getter closures. -pub fn generate_voter_cache<T: Config, AnyBound: Get<u32>>( +pub fn generate_voter_cache<T: MinerConfig, AnyBound: Get<u32>>( snapshot: &BoundedVec<VoterOf<T>, AnyBound>, ) -> BTreeMap<T::AccountId, usize> { let mut cache: BTreeMap<T::AccountId, usize> = BTreeMap::new(); @@ -94,7 +106,7 @@ pub fn generate_voter_cache<T: Config, AnyBound: Get<u32>>( /// ## Warning /// /// Note that this will represent the snapshot data from which the `cache` is generated. -pub fn voter_index_fn<T: Config>( +pub fn voter_index_fn<T: MinerConfig>( cache: &BTreeMap<T::AccountId, usize>, ) -> impl Fn(&T::AccountId) -> Option<SolutionVoterIndexOf<T>> + '_ { move |who| { @@ -108,7 +120,7 @@ pub fn voter_index_fn<T: Config>( /// /// Same as [`voter_index_fn`] but the returned function owns all its necessary data; nothing is /// borrowed. -pub fn voter_index_fn_owned<T: Config>( +pub fn voter_index_fn_owned<T: MinerConfig>( cache: BTreeMap<T::AccountId, usize>, ) -> impl Fn(&T::AccountId) -> Option<SolutionVoterIndexOf<T>> { move |who| { @@ -123,7 +135,7 @@ pub fn voter_index_fn_owned<T: Config>( /// ## Warning /// /// Note that this will represent the snapshot data from which the `cache` is generated. -pub fn voter_index_fn_usize<T: Config>( +pub fn voter_index_fn_usize<T: MinerConfig>( cache: &BTreeMap<T::AccountId, usize>, ) -> impl Fn(&T::AccountId) -> Option<usize> + '_ { move |who| cache.get(who).cloned() @@ -136,7 +148,7 @@ pub fn voter_index_fn_usize<T: Config>( /// /// Not meant to be used in production. #[cfg(test)] -pub fn voter_index_fn_linear<T: Config>( +pub fn voter_index_fn_linear<T: MinerConfig>( snapshot: &Vec<VoterOf<T>>, ) -> impl Fn(&T::AccountId) -> Option<SolutionVoterIndexOf<T>> + '_ { move |who| { @@ -154,7 +166,7 @@ pub fn voter_index_fn_linear<T: Config>( /// Note: to the extent possible, the returned function should be cached and reused. Producing that /// function requires a `O(n log n)` data transform. Each invocation of that function completes /// in `O(log n)`. -pub fn target_index_fn<T: Config>( +pub fn target_index_fn<T: MinerConfig>( snapshot: &Vec<T::AccountId>, ) -> impl Fn(&T::AccountId) -> Option<SolutionTargetIndexOf<T>> + '_ { let cache: BTreeMap<_, _> = @@ -174,7 +186,7 @@ pub fn target_index_fn<T: Config>( /// /// Not meant to be used in production. #[cfg(test)] -pub fn target_index_fn_linear<T: Config>( +pub fn target_index_fn_linear<T: MinerConfig>( snapshot: &Vec<T::AccountId>, ) -> impl Fn(&T::AccountId) -> Option<SolutionTargetIndexOf<T>> + '_ { move |who| { @@ -187,7 +199,7 @@ pub fn target_index_fn_linear<T: Config>( /// Create a function that can map a voter index ([`SolutionVoterIndexOf`]) to the actual voter /// account using a linearly indexible snapshot. -pub fn voter_at_fn<T: Config>( +pub fn voter_at_fn<T: MinerConfig>( snapshot: &Vec<VoterOf<T>>, ) -> impl Fn(SolutionVoterIndexOf<T>) -> Option<T::AccountId> + '_ { move |i| { @@ -199,7 +211,7 @@ pub fn voter_at_fn<T: Config>( /// Create a function that can map a target index ([`SolutionTargetIndexOf`]) to the actual target /// account using a linearly indexible snapshot. -pub fn target_at_fn<T: Config>( +pub fn target_at_fn<T: MinerConfig>( snapshot: &Vec<T::AccountId>, ) -> impl Fn(SolutionTargetIndexOf<T>) -> Option<T::AccountId> + '_ { move |i| { @@ -213,7 +225,7 @@ pub fn target_at_fn<T: Config>( /// /// This is not optimized and uses a linear search. #[cfg(test)] -pub fn stake_of_fn_linear<T: Config>( +pub fn stake_of_fn_linear<T: MinerConfig>( snapshot: &Vec<VoterOf<T>>, ) -> impl Fn(&T::AccountId) -> VoteWeight + '_ { move |who| { @@ -231,7 +243,7 @@ pub fn stake_of_fn_linear<T: Config>( /// /// The cache need must be derived from the same snapshot. Zero is returned if a voter is /// non-existent. -pub fn stake_of_fn<'a, T: Config, AnyBound: Get<u32>>( +pub fn stake_of_fn<'a, T: MinerConfig, AnyBound: Get<u32>>( snapshot: &'a BoundedVec<VoterOf<T>, AnyBound>, cache: &'a BTreeMap<T::AccountId, usize>, ) -> impl Fn(&T::AccountId) -> VoteWeight + 'a { diff --git a/substrate/frame/election-provider-multi-block/src/lib.rs b/substrate/frame/election-provider-multi-block/src/lib.rs index e736d17aaec..e86cf76089e 100644 --- a/substrate/frame/election-provider-multi-block/src/lib.rs +++ b/substrate/frame/election-provider-multi-block/src/lib.rs @@ -230,7 +230,7 @@ impl<T: Config> ElectionProvider for InitiateEmergencyPhase<T> { impl<T: Config> InstantElectionProvider for InitiateEmergencyPhase<T> { fn instant_elect( - _voters: Vec<VoterOf<T>>, + _voters: Vec<VoterOf<T::MinerConfig>>, _targets: Vec<Self::AccountId>, _desired_targets: u32, ) -> Result<BoundedSupportsOf<Self>, Self::Error> { @@ -264,7 +264,7 @@ impl<T: Config> ElectionProvider for Continue<T> { impl<T: Config> InstantElectionProvider for Continue<T> { fn instant_elect( - _voters: Vec<VoterOf<T>>, + _voters: Vec<VoterOf<T::MinerConfig>>, _targets: Vec<Self::AccountId>, _desired_targets: u32, ) -> Result<BoundedSupportsOf<Self>, Self::Error> { @@ -368,9 +368,11 @@ pub mod pallet { /// The duration of this should not be less than `T::Pages`, and there is no point in it /// being more than `SignedPhase::MaxSubmission::get() * T::Pages`. TODO: integrity test for /// it. + #[pallet::constant] type SignedValidationPhase: Get<BlockNumberFor<Self>>; /// The number of snapshot voters to fetch per block. + #[pallet::constant] type VoterSnapshotPerBlock: Get<u32>; /// The number of snapshot targets to fetch per block. @@ -392,17 +394,16 @@ pub mod pallet { BlockNumber = BlockNumberFor<Self>, >; - /// The solution type. - type Solution: codec::FullCodec - + Default - + PartialEq - + Eq - + Clone - + sp_std::fmt::Debug - + Ord - + NposSolution - + TypeInfo - + MaxEncodedLen; + /// The miner configuration. + type MinerConfig: crate::unsigned::miner::MinerConfig< + Pages = Self::Pages, + AccountId = <Self as frame_system::Config>::AccountId, + MaxVotesPerVoter = <Self::DataProvider as ElectionDataProvider>::MaxVotesPerVoter, + VoterSnapshotPerBlock = Self::VoterSnapshotPerBlock, + TargetSnapshotPerBlock = Self::TargetSnapshotPerBlock, + MaxBackersPerWinner = <Self::Verifier as verifier::Verifier>::MaxBackersPerWinner, + MaxWinnersPerPage = <Self::Verifier as verifier::Verifier>::MaxWinnersPerPage, + >; /// The fallback type used for the election. /// @@ -417,8 +418,10 @@ pub mod pallet { >; /// The verifier pallet's interface. - type Verifier: verifier::Verifier<Solution = SolutionOf<Self>, AccountId = Self::AccountId> - + verifier::AsynchronousVerifier; + type Verifier: verifier::Verifier< + Solution = SolutionOf<Self::MinerConfig>, + AccountId = Self::AccountId, + > + verifier::AsynchronousVerifier; /// The number of blocks ahead of time to try and have the election results ready by. type Lookahead: Get<BlockNumberFor<Self>>; @@ -574,13 +577,13 @@ pub mod pallet { use sp_std::mem::size_of; // The index type of both voters and targets need to be smaller than that of usize (very // unlikely to be the case, but anyhow). - assert!(size_of::<SolutionVoterIndexOf<T>>() <= size_of::<usize>()); - assert!(size_of::<SolutionTargetIndexOf<T>>() <= size_of::<usize>()); + assert!(size_of::<SolutionVoterIndexOf<T::MinerConfig>>() <= size_of::<usize>()); + assert!(size_of::<SolutionTargetIndexOf<T::MinerConfig>>() <= size_of::<usize>()); // also, because `VoterSnapshotPerBlock` and `TargetSnapshotPerBlock` are in u32, we // assert that both of these types are smaller than u32 as well. - assert!(size_of::<SolutionVoterIndexOf<T>>() <= size_of::<u32>()); - assert!(size_of::<SolutionTargetIndexOf<T>>() <= size_of::<u32>()); + assert!(size_of::<SolutionVoterIndexOf<T::MinerConfig>>() <= size_of::<u32>()); + assert!(size_of::<SolutionTargetIndexOf<T::MinerConfig>>() <= size_of::<u32>()); let pages_bn: BlockNumberFor<T> = T::Pages::get().into(); // pages must be at least 1. @@ -593,17 +596,18 @@ pub mod pallet { assert!(pages_bn + lookahead < T::UnsignedPhase::get()); // Based on the requirements of [`sp_npos_elections::Assignment::try_normalize`]. - let max_vote: usize = <SolutionOf<T> as NposSolution>::LIMIT; + let max_vote: usize = <SolutionOf<T::MinerConfig> as NposSolution>::LIMIT; // 2. Maximum sum of [SolutionAccuracy; 16] must fit into `UpperOf<OffchainAccuracy>`. - let maximum_chain_accuracy: Vec<UpperOf<SolutionAccuracyOf<T>>> = (0..max_vote) + let maximum_chain_accuracy: Vec<UpperOf<SolutionAccuracyOf<T::MinerConfig>>> = (0.. + max_vote) .map(|_| { - <UpperOf<SolutionAccuracyOf<T>>>::from( - <SolutionAccuracyOf<T>>::one().deconstruct(), + <UpperOf<SolutionAccuracyOf<T::MinerConfig>>>::from( + <SolutionAccuracyOf<T::MinerConfig>>::one().deconstruct(), ) }) .collect(); - let _: UpperOf<SolutionAccuracyOf<T>> = maximum_chain_accuracy + let _: UpperOf<SolutionAccuracyOf<T::MinerConfig>> = maximum_chain_accuracy .iter() .fold(Zero::zero(), |acc, x| acc.checked_add(x).unwrap()); @@ -614,7 +618,7 @@ pub mod pallet { // solution cannot represent any voters more than `LIMIT` anyhow. assert_eq!( <T::DataProvider as ElectionDataProvider>::MaxVotesPerVoter::get(), - <SolutionOf<T> as NposSolution>::LIMIT as u32, + <SolutionOf<T::MinerConfig> as NposSolution>::LIMIT as u32, ); // The duration of the signed validation phase should be such that at least one solution @@ -637,6 +641,17 @@ pub mod pallet { /// Error of the pallet that can be returned in response to dispatches. #[pallet::error] pub enum Error<T> { + /// Triggering the `Fallback` failed. + Fallback, + /// Unexpected phase + UnexpectedPhase, + /// Snapshot was unavailable. + Snapshot, + } + + /// Common errors in all sub-pallets and miner. + #[derive(PartialEq, Eq, Clone, Encode, Decode, Debug)] + pub enum CommonError { /// Submission is too early (or too late, depending on your point of reference). EarlySubmission, /// The round counter is wrong. @@ -649,28 +664,10 @@ pub mod pallet { WrongWinnerCount, /// The snapshot fingerprint is not a match. The solution is likely outdated. WrongFingerprint, - /// Triggering the `Fallback` failed. - Fallback, - /// Unexpected phase - UnexpectedPhase, - /// Snapshot was unavailable. + /// Snapshot was not available. Snapshot, } - impl<T: Config> PartialEq for Error<T> { - fn eq(&self, other: &Self) -> bool { - use Error::*; - match (self, other) { - (EarlySubmission, EarlySubmission) | - (WrongRound, WrongRound) | - (WeakSubmission, WeakSubmission) | - (WrongWinnerCount, WrongWinnerCount) | - (WrongPageCount, WrongPageCount) => true, - _ => false, - } - } - } - /// Internal counter for the number of rounds. /// /// This is useful for de-duplication of transactions submitted to the pool, and general @@ -729,7 +726,7 @@ pub mod pallet { PagedTargetSnapshotHash::<T>::insert(Pallet::<T>::msp(), hash); } - pub(crate) fn set_voters(page: PageIndex, voters: VoterPageOf<T>) { + pub(crate) fn set_voters(page: PageIndex, voters: VoterPageOf<T::MinerConfig>) { let hash = Self::write_storage_with_pre_allocate( &PagedVoterSnapshot::<T>::hashed_key_for(page), voters, @@ -753,7 +750,7 @@ pub mod pallet { DesiredTargets::<T>::get() } - pub(crate) fn voters(page: PageIndex) -> Option<VoterPageOf<T>> { + pub(crate) fn voters(page: PageIndex) -> Option<VoterPageOf<T::MinerConfig>> { PagedVoterSnapshot::<T>::get(page) } @@ -901,7 +898,7 @@ pub mod pallet { PagedTargetSnapshot::<T>::iter().count().saturated_into::<PageIndex>() } - pub(crate) fn voters_iter_flattened() -> impl Iterator<Item = VoterOf<T>> { + pub(crate) fn voters_iter_flattened() -> impl Iterator<Item = VoterOf<T::MinerConfig>> { let key_range = (crate::Pallet::<T>::lsp()..=crate::Pallet::<T>::msp()).collect::<Vec<_>>(); key_range @@ -943,7 +940,8 @@ pub mod pallet { type DesiredTargets<T> = StorageValue<_, u32>; /// Paginated voter snapshot. At most [`T::Pages`] keys will exist. #[pallet::storage] - type PagedVoterSnapshot<T: Config> = StorageMap<_, Twox64Concat, PageIndex, VoterPageOf<T>>; + type PagedVoterSnapshot<T: Config> = + StorageMap<_, Twox64Concat, PageIndex, VoterPageOf<T::MinerConfig>>; /// Same as [`PagedVoterSnapshot`], but it will store the hash of the snapshot. /// /// The hash is generated using [`frame_system::Config::Hashing`]. @@ -1002,34 +1000,34 @@ impl<T: Config> Pallet<T> { /// These compliment a feasibility-check, which is exactly the opposite: snapshot-dependent /// checks. pub(crate) fn snapshot_independent_checks( - paged_solution: &PagedRawSolution<T>, + paged_solution: &PagedRawSolution<T::MinerConfig>, maybe_snapshot_fingerprint: Option<T::Hash>, - ) -> Result<(), Error<T>> { + ) -> Result<(), CommonError> { // Note that the order of these checks are critical for the correctness and performance of // `restore_or_compute_then_maybe_submit`. We want to make sure that we always check round // first, so that if it has a wrong round, we can detect and delete it from the cache right // from the get go. // ensure round is current - ensure!(Self::round() == paged_solution.round, Error::<T>::WrongRound); + ensure!(Self::round() == paged_solution.round, CommonError::WrongRound); // ensure score is being improved, if the claim is even correct. ensure!( <T::Verifier as Verifier>::ensure_claimed_score_improves(paged_solution.score), - Error::<T>::WeakSubmission, + CommonError::WeakSubmission, ); // ensure solution pages are no more than the snapshot ensure!( paged_solution.solution_pages.len().saturated_into::<PageIndex>() <= T::Pages::get(), - Error::<T>::WrongPageCount + CommonError::WrongPageCount ); // finally, check the winner count being correct. if let Some(desired_targets) = Snapshot::<T>::desired_targets() { ensure!( desired_targets == paged_solution.winner_count_single_page_target_snapshot() as u32, - Error::<T>::WrongWinnerCount + CommonError::WrongWinnerCount ) } @@ -1038,7 +1036,7 @@ impl<T: Config> Pallet<T> { maybe_snapshot_fingerprint .map_or(true, |snapshot_fingerprint| Snapshot::<T>::fingerprint() == snapshot_fingerprint), - Error::<T>::WrongFingerprint + CommonError::WrongFingerprint ); Ok(()) @@ -1833,7 +1831,7 @@ mod phase_rotation { #[cfg(test)] mod election_provider { use super::*; - use crate::{mock::*, unsigned::miner::BaseMiner, verifier::Verifier, Phase}; + use crate::{mock::*, unsigned::miner::OffchainWorkerMiner, verifier::Verifier, Phase}; use frame_election_provider_support::{BoundedSupport, BoundedSupports, ElectionProvider}; use frame_support::{ assert_storage_noop, testing_prelude::bounded_vec, unsigned::ValidateUnsigned, @@ -1849,7 +1847,7 @@ mod election_provider { assert_eq!(MultiBlock::current_phase(), Phase::Signed); // load a solution into the verifier - let paged = BaseMiner::<Runtime>::mine_solution(Pages::get(), false).unwrap(); + let paged = OffchainWorkerMiner::<Runtime>::mine_solution(Pages::get(), false).unwrap(); let score = paged.score.clone(); // now let's submit this one by one, into the signed phase. @@ -1943,7 +1941,7 @@ mod election_provider { assert_eq!(MultiBlock::current_phase(), Phase::Signed); // load a solution into the verifier - let paged = BaseMiner::<Runtime>::mine_solution(Pages::get(), false).unwrap(); + let paged = OffchainWorkerMiner::<Runtime>::mine_solution(Pages::get(), false).unwrap(); let score = paged.score.clone(); load_signed_for_verification_and_start(99, paged, 0); @@ -2000,7 +1998,7 @@ mod election_provider { assert_eq!(MultiBlock::current_phase(), Phase::Signed); // load a solution into the verifier - let paged = BaseMiner::<Runtime>::mine_solution(Pages::get(), false).unwrap(); + let paged = OffchainWorkerMiner::<Runtime>::mine_solution(Pages::get(), false).unwrap(); let score = paged.score.clone(); load_signed_for_verification_and_start(99, paged, 0); @@ -2070,7 +2068,7 @@ mod election_provider { let round = MultiBlock::round(); // load a solution into the verifier - let paged = BaseMiner::<Runtime>::mine_solution(Pages::get(), false).unwrap(); + let paged = OffchainWorkerMiner::<Runtime>::mine_solution(Pages::get(), false).unwrap(); load_signed_for_verification_and_start_and_roll_to_verified(99, paged, 0); @@ -2113,7 +2111,7 @@ mod election_provider { assert_eq!(MultiBlock::current_phase(), Phase::Signed); // load a solution into the verifier - let paged = BaseMiner::<Runtime>::mine_solution(Pages::get(), false).unwrap(); + let paged = OffchainWorkerMiner::<Runtime>::mine_solution(Pages::get(), false).unwrap(); load_signed_for_verification_and_start_and_roll_to_verified(99, paged, 0); assert_eq!(MultiBlock::current_phase(), Phase::SignedValidation(20)); @@ -2353,7 +2351,6 @@ mod admin_ops { #[cfg(test)] mod snapshot { - use super::*; #[test] fn fetches_exact_voters() { diff --git a/substrate/frame/election-provider-multi-block/src/mock/mod.rs b/substrate/frame/election-provider-multi-block/src/mock/mod.rs index f7bfa98814f..c5552b5439a 100644 --- a/substrate/frame/election-provider-multi-block/src/mock/mod.rs +++ b/substrate/frame/election-provider-multi-block/src/mock/mod.rs @@ -18,13 +18,14 @@ mod signed; mod staking; mod weight_info; + use super::*; use crate::{ self as multi_block, signed::{self as signed_pallet, HoldReason}, unsigned::{ self as unsigned_pallet, - miner::{BaseMiner, MinerError}, + miner::{MinerConfig, OffchainMinerError, OffchainWorkerMiner}, }, verifier::{self as verifier_pallet, AsynchronousVerifier, Status}, }; @@ -35,9 +36,7 @@ use frame_election_provider_support::{ }; pub use frame_support::{assert_noop, assert_ok}; use frame_support::{ - derive_impl, - pallet_prelude::*, - parameter_types, + derive_impl, parameter_types, traits::{fungible::InspectHold, Hooks}, weights::{constants, Weight}, }; @@ -138,7 +137,6 @@ parameter_types! { pub static MinerTxPriority: u64 = 100; pub static SolutionImprovementThreshold: Perbill = Perbill::zero(); pub static OffchainRepeat: BlockNumber = 5; - pub static MinerMaxWeight: Weight = BlockWeights::get().max_block; pub static MinerMaxLength: u32 = 256; pub static MaxVotesPerVoter: u32 = <TestNposSolution as NposSolution>::LIMIT as u32; @@ -177,14 +175,26 @@ impl crate::unsigned::WeightInfo for MockUnsignedWeightInfo { impl crate::unsigned::Config for Runtime { type OffchainRepeat = OffchainRepeat; - type MinerMaxWeight = MinerMaxWeight; - type MinerMaxLength = MinerMaxLength; type MinerTxPriority = MinerTxPriority; - type OffchainSolver = - frame_election_provider_support::SequentialPhragmen<Self::AccountId, Perbill>; + type OffchainSolver = SequentialPhragmen<Self::AccountId, Perbill>; type WeightInfo = MockUnsignedWeightInfo; } +impl MinerConfig for Runtime { + type AccountId = AccountId; + type Hash = <Runtime as frame_system::Config>::Hash; + type MaxLength = MinerMaxLength; + type Pages = Pages; + type MaxVotesPerVoter = MaxVotesPerVoter; + type Solution = TestNposSolution; + type Solver = SequentialPhragmen<AccountId, Perbill>; + type TargetSnapshotPerBlock = TargetSnapshotPerBlock; + type VoterSnapshotPerBlock = VoterSnapshotPerBlock; + type MaxBackersPerWinner = MaxBackersPerWinner; + type MaxBackersPerWinnerFinal = MaxBackersPerWinnerFinal; + type MaxWinnersPerPage = MaxWinnersPerPage; +} + impl crate::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SignedPhase = SignedPhase; @@ -195,7 +205,7 @@ impl crate::Config for Runtime { type TargetSnapshotPerBlock = TargetSnapshotPerBlock; type VoterSnapshotPerBlock = VoterSnapshotPerBlock; type Lookahead = Lookahead; - type Solution = TestNposSolution; + type MinerConfig = Self; type WeightInfo = weight_info::DualMockWeightInfo; type Verifier = VerifierPallet; type AdminOrigin = EnsureRoot<AccountId>; @@ -338,10 +348,6 @@ impl ExtBuilder { VoterSnapshotPerBlock::set(count); self } - pub(crate) fn miner_weight(self, weight: Weight) -> Self { - MinerMaxWeight::set(weight); - self - } pub(crate) fn miner_max_length(self, len: u32) -> Self { MinerMaxLength::set(len); self @@ -519,13 +525,15 @@ pub fn assert_none_snapshot() { /// changes. /// /// For testing, we never want to do reduce. -pub fn mine_full_solution() -> Result<PagedRawSolution<Runtime>, MinerError<Runtime>> { - BaseMiner::<Runtime>::mine_solution(Pages::get(), false) +pub fn mine_full_solution() -> Result<PagedRawSolution<Runtime>, OffchainMinerError<Runtime>> { + OffchainWorkerMiner::<Runtime>::mine_solution(Pages::get(), false) } /// Same as [`mine_full_solution`] but with custom pages. -pub fn mine_solution(pages: PageIndex) -> Result<PagedRawSolution<Runtime>, MinerError<Runtime>> { - BaseMiner::<Runtime>::mine_solution(pages, false) +pub fn mine_solution( + pages: PageIndex, +) -> Result<PagedRawSolution<Runtime>, OffchainMinerError<Runtime>> { + OffchainWorkerMiner::<Runtime>::mine_solution(pages, false) } /// Assert that `count` voters exist across `pages` number of pages. diff --git a/substrate/frame/election-provider-multi-block/src/mock/signed.rs b/substrate/frame/election-provider-multi-block/src/mock/signed.rs index c59648d722b..e6e1b5cf3af 100644 --- a/substrate/frame/election-provider-multi-block/src/mock/signed.rs +++ b/substrate/frame/election-provider-multi-block/src/mock/signed.rs @@ -22,6 +22,7 @@ use crate::{ AccountId, RuntimeHoldReason, RuntimeOrigin, VerifierPallet, }, signed::{self as signed_pallet, Event as SignedEvent, Submissions}, + unsigned::miner::MinerConfig, verifier::{self, AsynchronousVerifier, SolutionDataProvider, VerificationResult, Verifier}, Event, PadSolutionPages, PagedRawSolution, Pagify, Phase, SolutionOf, }; @@ -44,7 +45,7 @@ parameter_types! { /// Useful for when you don't care too much about the signed phase. pub struct MockSignedPhase; impl SolutionDataProvider for MockSignedPhase { - type Solution = <Runtime as crate::Config>::Solution; + type Solution = <Runtime as MinerConfig>::Solution; fn get_page(page: PageIndex) -> Option<Self::Solution> { MockSignedNextSolution::get().map(|i| i.get(page as usize).cloned().unwrap_or_default()) } @@ -96,7 +97,7 @@ pub enum SignedSwitch { pub struct DualSignedPhase; impl SolutionDataProvider for DualSignedPhase { - type Solution = <Runtime as crate::Config>::Solution; + type Solution = <Runtime as MinerConfig>::Solution; fn get_page(page: PageIndex) -> Option<Self::Solution> { match SignedPhaseSwitch::get() { SignedSwitch::Mock => MockSignedNextSolution::get() @@ -166,7 +167,7 @@ pub fn load_signed_for_verification(who: AccountId, paged: PagedRawSolution<Runt pub fn load_signed_for_verification_and_start( who: AccountId, paged: PagedRawSolution<Runtime>, - round: u32, + _round: u32, ) { load_signed_for_verification(who, paged); diff --git a/substrate/frame/election-provider-multi-block/src/signed/mod.rs b/substrate/frame/election-provider-multi-block/src/signed/mod.rs index 0953957087f..bd762b3f6c5 100644 --- a/substrate/frame/election-provider-multi-block/src/signed/mod.rs +++ b/substrate/frame/election-provider-multi-block/src/signed/mod.rs @@ -40,7 +40,10 @@ //! //! **Metadata update**: imagine you mis-computed your score. -use crate::verifier::{AsynchronousVerifier, SolutionDataProvider, Status, VerificationResult}; +use crate::{ + types::SolutionOf, + verifier::{AsynchronousVerifier, SolutionDataProvider, Status, VerificationResult}, +}; use codec::{Decode, Encode, MaxEncodedLen}; use frame_election_provider_support::PageIndex; use frame_support::{ @@ -93,7 +96,8 @@ pub struct SubmissionMetadata<T: Config> { } impl<T: Config> SolutionDataProvider for Pallet<T> { - type Solution = T::Solution; + type Solution = SolutionOf<T::MinerConfig>; + fn get_page(page: PageIndex) -> Option<Self::Solution> { // note: a non-existing page will still be treated as merely an empty page. This could be // re-considered. @@ -268,7 +272,7 @@ pub mod pallet { /// - And the value of `metadata.score` must be equal to the score stored in /// `SortedScores`. /// - And visa versa: for any key existing in `SubmissionMetadataStorage`, an item must exist in - /// `SortedScores`. TODO: + /// `SortedScores`. /// - For any first key existing in `SubmissionStorage`, a key must exist in /// `SubmissionMetadataStorage`. /// - For any first key in `SubmissionStorage`, the number of second keys existing should be the @@ -300,7 +304,7 @@ pub mod pallet { NMapKey<Twox64Concat, T::AccountId>, NMapKey<Twox64Concat, PageIndex>, ), - T::Solution, + SolutionOf<T::MinerConfig>, OptionQuery, >; @@ -460,7 +464,7 @@ pub mod pallet { round: u32, who: &T::AccountId, page: PageIndex, - maybe_solution: Option<T::Solution>, + maybe_solution: Option<SolutionOf<T::MinerConfig>>, ) -> DispatchResultWithPostInfo { Self::mutate_checked(round, || { Self::try_mutate_page_inner(round, who, page, maybe_solution) @@ -471,7 +475,7 @@ pub mod pallet { round: u32, who: &T::AccountId, page: PageIndex, - maybe_solution: Option<T::Solution>, + maybe_solution: Option<SolutionOf<T::MinerConfig>>, ) -> DispatchResultWithPostInfo { let mut metadata = SubmissionMetadataStorage::<T>::get(round, who).ok_or(Error::<T>::NotRegistered)?; @@ -527,7 +531,7 @@ pub mod pallet { round: u32, who: &T::AccountId, page: PageIndex, - ) -> Option<T::Solution> { + ) -> Option<SolutionOf<T::MinerConfig>> { SubmissionStorage::<T>::get((round, who, &page)) } } @@ -536,7 +540,7 @@ pub mod pallet { impl<T: Config> Submissions<T> { pub fn submissions_iter( round: u32, - ) -> impl Iterator<Item = (T::AccountId, PageIndex, T::Solution)> { + ) -> impl Iterator<Item = (T::AccountId, PageIndex, SolutionOf<T::MinerConfig>)> { SubmissionStorage::<T>::iter_prefix((round,)).map(|((x, y), z)| (x, y, z)) } @@ -553,7 +557,7 @@ pub mod pallet { pub fn pages_of( round: u32, who: T::AccountId, - ) -> impl Iterator<Item = (PageIndex, T::Solution)> { + ) -> impl Iterator<Item = (PageIndex, SolutionOf<T::MinerConfig>)> { SubmissionStorage::<T>::iter_prefix((round, who)) } @@ -700,7 +704,7 @@ pub mod pallet { pub fn submit_page( origin: OriginFor<T>, page: PageIndex, - maybe_solution: Option<T::Solution>, + maybe_solution: Option<SolutionOf<T::MinerConfig>>, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; ensure!(crate::Pallet::<T>::current_phase().is_signed(), Error::<T>::PhaseNotSigned); diff --git a/substrate/frame/election-provider-multi-block/src/types.rs b/substrate/frame/election-provider-multi-block/src/types.rs index 6a4e77843dd..2be6705c344 100644 --- a/substrate/frame/election-provider-multi-block/src/types.rs +++ b/substrate/frame/election-provider-multi-block/src/types.rs @@ -18,30 +18,19 @@ use frame_support::{ BoundedVec, CloneNoBound, DefaultNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; +use sp_core::Get; use sp_std::{collections::btree_set::BTreeSet, fmt::Debug, prelude::*}; -use crate::Verifier; +use crate::unsigned::miner::MinerConfig; use codec::{Decode, Encode, MaxEncodedLen}; -use frame_election_provider_support::{BoundedSupports, ElectionProvider}; +use frame_election_provider_support::ElectionProvider; pub use frame_election_provider_support::{NposSolution, PageIndex}; use scale_info::TypeInfo; pub use sp_npos_elections::{ElectionResult, ElectionScore}; -use sp_runtime::{ - traits::{One, Saturating, Zero}, - SaturatedConversion, -}; - -/// The supports that's returned from a given [`Verifier`]. -/// TODO: rename this -pub type SupportsOf<V> = BoundedSupports< - <V as Verifier>::AccountId, - <V as Verifier>::MaxWinnersPerPage, - <V as Verifier>::MaxBackersPerWinner, ->; +use sp_runtime::SaturatedConversion; /// The solution type used by this crate. -pub type SolutionOf<T> = <T as crate::Config>::Solution; - +pub type SolutionOf<T> = <T as MinerConfig>::Solution; /// The voter index. Derived from [`SolutionOf`]. pub type SolutionVoterIndexOf<T> = <SolutionOf<T> as NposSolution>::VoterIndex; /// The target index. Derived from [`SolutionOf`]. @@ -53,7 +42,7 @@ pub type FallbackErrorOf<T> = <<T as crate::Config>::Fallback as ElectionProvide /// The relative distribution of a voter's stake among the winning targets. pub type AssignmentOf<T> = - sp_npos_elections::Assignment<<T as frame_system::Config>::AccountId, SolutionAccuracyOf<T>>; + sp_npos_elections::Assignment<<T as MinerConfig>::AccountId, SolutionAccuracyOf<T>>; #[derive( TypeInfo, @@ -68,8 +57,8 @@ pub type AssignmentOf<T> = )] #[codec(mel_bound(T: crate::Config))] #[scale_info(skip_type_params(T))] -pub struct PagedRawSolution<T: crate::Config> { - pub solution_pages: BoundedVec<SolutionOf<T>, T::Pages>, +pub struct PagedRawSolution<T: MinerConfig> { + pub solution_pages: BoundedVec<SolutionOf<T>, <T as MinerConfig>::Pages>, pub score: ElectionScore, pub round: u32, } @@ -132,7 +121,7 @@ impl<T: Default + Clone + Debug, Bound: frame_support::traits::Get<u32>> PadSolu } } -impl<T: crate::Config> PagedRawSolution<T> { +impl<T: MinerConfig> PagedRawSolution<T> { /// Get the total number of voters, assuming that voters in each page are unique. pub fn voter_count(&self) -> usize { self.solution_pages @@ -163,20 +152,21 @@ impl<T: crate::Config> PagedRawSolution<T> { // NOTE on naming conventions: type aliases that end with `Of` should always be `Of<T: Config>`. -/// Alias for a voter, parameterized by this crate's config. -pub(crate) type VoterOf<T> = - frame_election_provider_support::VoterOf<<T as crate::Config>::DataProvider>; +/// Alias for a voter, parameterized by the miner config. +pub(crate) type VoterOf<T> = frame_election_provider_support::Voter< + <T as MinerConfig>::AccountId, + <T as MinerConfig>::MaxVotesPerVoter, +>; /// Alias for a page of voters, parameterized by this crate's config. -pub(crate) type VoterPageOf<T> = - BoundedVec<VoterOf<T>, <T as crate::Config>::VoterSnapshotPerBlock>; +pub(crate) type VoterPageOf<T> = BoundedVec<VoterOf<T>, <T as MinerConfig>::VoterSnapshotPerBlock>; /// Alias for all pages of voters, parameterized by this crate's config. -pub(crate) type AllVoterPagesOf<T> = BoundedVec<VoterPageOf<T>, <T as crate::Config>::Pages>; +pub(crate) type AllVoterPagesOf<T> = BoundedVec<VoterPageOf<T>, <T as MinerConfig>::Pages>; /// Maximum number of items that [`AllVoterPagesOf`] can contain, when flattened. -pub(crate) struct MaxFlattenedVoters<T: crate::Config>(sp_std::marker::PhantomData<T>); -impl<T: crate::Config> frame_support::traits::Get<u32> for MaxFlattenedVoters<T> { +pub(crate) struct MaxFlattenedVoters<T: MinerConfig>(sp_std::marker::PhantomData<T>); +impl<T: MinerConfig> Get<u32> for MaxFlattenedVoters<T> { fn get() -> u32 { T::VoterSnapshotPerBlock::get().saturating_mul(T::Pages::get()) } @@ -223,53 +213,6 @@ impl Default for ElectionCompute { } } -// TODO: maybe use it, else remove it. -#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, MaxEncodedLen, Debug, TypeInfo)] -pub enum PhaseExperimental<BlockNumber> { - Off, - Snapshot(BlockNumber), - Signed(BlockNumber), - SignedValidation(BlockNumber), - Unsigned(BlockNumber), - Emergency, -} - -impl<BlockNumber: Saturating + Zero + One> PhaseExperimental<BlockNumber> { - pub fn tick(self, next_phase_len: BlockNumber) -> Self { - use PhaseExperimental::*; - match self { - Off => Snapshot(next_phase_len), - Snapshot(x) => - if x.is_zero() { - Signed(next_phase_len) - } else { - Snapshot(x.saturating_sub(One::one())) - }, - Signed(x) => - if x.is_zero() { - SignedValidation(next_phase_len) - } else { - Signed(x.saturating_sub(One::one())) - }, - SignedValidation(x) => - if x.is_zero() { - Unsigned(next_phase_len) - } else { - SignedValidation(x.saturating_sub(One::one())) - }, - - Unsigned(x) => - if x.is_zero() { - // note this: unsigned phase does not really end, only elect can end it. - Unsigned(Zero::zero()) - } else { - Unsigned(x.saturating_sub(One::one())) - }, - Emergency => Emergency, - } - } -} - /// Current phase of the pallet. #[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, MaxEncodedLen, Debug, TypeInfo)] pub enum Phase<Bn> { diff --git a/substrate/frame/election-provider-multi-block/src/unsigned/miner.rs b/substrate/frame/election-provider-multi-block/src/unsigned/miner.rs index b06482b1ba8..4f4ecc27078 100644 --- a/substrate/frame/election-provider-multi-block/src/unsigned/miner.rs +++ b/substrate/frame/election-provider-multi-block/src/unsigned/miner.rs @@ -15,21 +15,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::{Call, Config, Pallet, WeightInfo}; +use super::{Call, Config, Pallet}; use crate::{ helpers, - types::*, + types::{PadSolutionPages, *}, verifier::{self}, + CommonError, }; use codec::Encode; use frame_election_provider_support::{ExtendedBalance, NposSolver, Support, VoteWeight}; use frame_support::{traits::Get, BoundedVec}; use frame_system::pallet_prelude::*; +use scale_info::TypeInfo; +use sp_npos_elections::EvaluateSupport; use sp_runtime::{ offchain::storage::{MutateStorageError, StorageValueRef}, traits::{SaturatedConversion, Saturating, Zero}, }; -use sp_std::prelude::*; +use sp_std::{collections::btree_map::BTreeMap, prelude::*}; // TODO: fuzzer for the miner @@ -49,21 +52,21 @@ pub enum SnapshotType { } /// Error type of the pallet's [`crate::Config::Solver`]. -pub type OffchainSolverErrorOf<T> = <<T as Config>::OffchainSolver as NposSolver>::Error; +pub type MinerSolverErrorOf<T> = <<T as MinerConfig>::Solver as NposSolver>::Error; /// The errors related to the [`BaseMiner`]. #[derive( frame_support::DebugNoBound, frame_support::EqNoBound, frame_support::PartialEqNoBound, )] -pub enum MinerError<T: Config> { +pub enum MinerError<T: MinerConfig> { /// An internal error in the NPoS elections crate. NposElections(sp_npos_elections::Error), /// An internal error in the generic solver. - Solver(OffchainSolverErrorOf<T>), + Solver(MinerSolverErrorOf<T>), /// Snapshot data was unavailable unexpectedly. SnapshotUnAvailable(SnapshotType), - /// The snapshot-independent checks failed for the mined solution. - SnapshotIndependentChecks(crate::Error<T>), + /// The base, common errors from the pallet. + Common(CommonError), /// The solution generated from the miner is not feasible. Feasibility(verifier::FeasibilityError), /// Some page index has been invalid. @@ -74,28 +77,33 @@ pub enum MinerError<T: Config> { Defensive(&'static str), } -impl<T: Config> From<sp_npos_elections::Error> for MinerError<T> { +impl<T: MinerConfig> From<sp_npos_elections::Error> for MinerError<T> { fn from(e: sp_npos_elections::Error) -> Self { MinerError::NposElections(e) } } -impl<T: Config> From<crate::verifier::FeasibilityError> for MinerError<T> { +impl<T: MinerConfig> From<verifier::FeasibilityError> for MinerError<T> { fn from(e: verifier::FeasibilityError) -> Self { MinerError::Feasibility(e) } } +impl<T: MinerConfig> From<CommonError> for MinerError<T> { + fn from(e: CommonError) -> Self { + MinerError::Common(e) + } +} + /// The errors related to the [`OffchainMiner`]. #[derive( frame_support::DebugNoBound, frame_support::EqNoBound, frame_support::PartialEqNoBound, )] pub enum OffchainMinerError<T: Config> { /// An error in the base miner. - BaseMiner(MinerError<T>), - /// unsigned-specific checks failed. - // NOTE: This uses the error type of the parent crate for now. Might be reworked. - UnsignedChecks(crate::Error<T>), + BaseMiner(MinerError<T::MinerConfig>), + /// The base, common errors from the pallet. + Common(CommonError), /// Something went wrong fetching the lock. Lock(&'static str), /// Submitting a transaction to the pool failed. @@ -108,25 +116,120 @@ pub enum OffchainMinerError<T: Config> { FailedToStoreSolution, } -impl<T: Config> From<MinerError<T>> for OffchainMinerError<T> { - fn from(e: MinerError<T>) -> Self { +impl<T: Config> From<MinerError<T::MinerConfig>> for OffchainMinerError<T> { + fn from(e: MinerError<T::MinerConfig>) -> Self { OffchainMinerError::BaseMiner(e) } } +impl<T: Config> From<CommonError> for OffchainMinerError<T> { + fn from(e: CommonError) -> Self { + OffchainMinerError::Common(e) + } +} + +/// Configurations for the miner. +/// +/// This is extracted from the main crate's config so that an offchain miner can readily use the +/// [`BaseMiner`] without needing to deal with the rest of the pallet's configuration. +pub trait MinerConfig { + /// The account id type. + type AccountId: Ord + Clone + codec::Codec + core::fmt::Debug; + /// The solution that the miner is mining. + /// The solution type. + type Solution: codec::FullCodec + + Default + + PartialEq + + Eq + + Clone + + sp_std::fmt::Debug + + Ord + + NposSolution + + TypeInfo + + codec::MaxEncodedLen; + /// The solver type. + type Solver: NposSolver<AccountId = Self::AccountId>; + /// The maximum length that the miner should use for a solution, per page. + type MaxLength: Get<u32>; + /// Maximum number of votes per voter. + /// + /// Must be the same as configured in the [`crate::Config::DataProvider`]. + type MaxVotesPerVoter: Get<u32>; + /// Maximum number of winners to select per page. + /// + /// The miner should respect this, it is used for trimming, and bounded data types. + /// + /// Should equal to the onchain value set in `Verifier::Config`. + type MaxWinnersPerPage: Get<u32>; + /// Maximum number of backers per winner, per page. + /// + /// The miner should respect this, it is used for trimming, and bounded data types. + /// + /// Should equal to the onchain value set in `Verifier::Config`. + type MaxBackersPerWinner: Get<u32>; + /// Maximum number of backers, per winner, across all pages. + /// + /// The miner should respect this, it is used for trimming, and bounded data types. + /// + /// Should equal to the onchain value set in `Verifier::Config`. + type MaxBackersPerWinnerFinal: Get<u32>; + /// Maximum number of backers, per winner, per page. + + /// Maximum number of pages that we may compute. + /// + /// Must be the same as configured in the [`crate::Config`]. + type Pages: Get<u32>; + /// Maximum number of voters per snapshot page. + /// + /// Must be the same as configured in the [`crate::Config`]. + type VoterSnapshotPerBlock: Get<u32>; + /// Maximum number of targets per snapshot page. + /// + /// Must be the same as configured in the [`crate::Config`]. + type TargetSnapshotPerBlock: Get<u32>; + /// The hash type of the runtime. + type Hash: Eq + PartialEq; +} + /// A base miner that is only capable of mining a new solution and checking it against the state of /// this pallet for feasibility, and trimming its length/weight. /// /// The type of solver is generic and can be provided, as long as it has the same error and account /// id type as the [`crate::Config::OffchainSolver`]. The default is whatever is fed to /// [`crate::unsigned::Config::OffchainSolver`]. -pub struct BaseMiner<T: Config, Solver = <T as Config>::OffchainSolver>( - sp_std::marker::PhantomData<(T, Solver)>, -); +pub struct BaseMiner<T: MinerConfig>(sp_std::marker::PhantomData<T>); + +/// Parameterized `BoundedSupports` for the miner. +pub type SupportsOfMiner<T> = frame_election_provider_support::BoundedSupports< + <T as MinerConfig>::AccountId, + <T as MinerConfig>::MaxWinnersPerPage, + <T as MinerConfig>::MaxBackersPerWinner, +>; + +/// Aggregator for inputs to [`BaseMiner`]. +pub struct MineInput<T: MinerConfig> { + /// Number of winners to pick. + pub desired_targets: u32, + /// All of the targets. + pub all_targets: BoundedVec<T::AccountId, T::TargetSnapshotPerBlock>, + /// Paginated list of voters. + /// + /// Note for staking-miners: How this is calculated is rather delicate, and the order of the + /// nested vectors matter. See carefully how [`OffchainWorkerMiner::mine_solution`] is doing + /// this. + pub voter_pages: AllVoterPagesOf<T>, + /// Number of pages to mind. + /// + /// Note for staking-miner: Always use [`MinerConfig::Pages`] unless explicitly wanted + /// otherwise. + pub pages: PageIndex, + /// Whether to reduce the solution. Almost always`` + pub do_reduce: bool, + /// The current round for which the solution is being calculated. + pub round: u32, +} -impl<T: Config, Solver: NposSolver<AccountId = T::AccountId, Error = OffchainSolverErrorOf<T>>> - BaseMiner<T, Solver> -{ +impl<T: MinerConfig> BaseMiner<T> { /// Mine a new npos solution, with the given number of pages. /// /// This miner is only capable of mining a solution that either uses all of the pages of the @@ -134,9 +237,8 @@ impl<T: Config, Solver: NposSolver<AccountId = T::AccountId, Error = OffchainSol /// /// This always trims the solution to match a few parameters: /// - /// 1. [`crate::verifier::Config::MaxBackersPerWinner`] - /// 2. [`crate::unsigned::Config::MinerMaxLength`] - /// 3. [`crate::unsigned::Config::MinerMaxWeight`] + /// [`MinerConfig::MaxWinnersPerPage`], [`MinerConfig::MaxBackersPerWinner`], + /// [`MinerConfig::MaxBackersPerWinnerFinal`] and [`MinerConfig::MaxLength`]. /// /// The order of pages returned is aligned with the snapshot. For example, the index 0 of the /// returning solution pages corresponds to the page 0 of the snapshot. @@ -144,45 +246,12 @@ impl<T: Config, Solver: NposSolver<AccountId = T::AccountId, Error = OffchainSol /// The only difference is, if the solution is partial, then [`Pagify`] must be used to properly /// pad the results. pub fn mine_solution( - mut pages: PageIndex, - do_reduce: bool, + MineInput { desired_targets, all_targets, voter_pages, mut pages, do_reduce, round }: MineInput< + T, + >, ) -> Result<PagedRawSolution<T>, MinerError<T>> { pages = pages.min(T::Pages::get()); - // read the appropriate snapshot pages. - let desired_targets = crate::Snapshot::<T>::desired_targets() - .ok_or(MinerError::SnapshotUnAvailable(SnapshotType::DesiredTargets))?; - let all_targets = crate::Snapshot::<T>::targets() - .ok_or(MinerError::SnapshotUnAvailable(SnapshotType::Targets))?; - - // This is the range of voters that we are interested in. Mind the second `.rev`, it is - // super critical. - let voter_pages_range = (crate::Pallet::<T>::lsp()..crate::Pallet::<T>::msp() + 1) - .rev() - .take(pages as usize) - .rev(); - - sublog!( - debug, - "unsigned::base-miner", - "mining a solution with {} pages, voter snapshot range will be: {:?}", - pages, - voter_pages_range.clone().collect::<Vec<_>>() - ); - - // NOTE: if `pages (2) < T::Pages (3)`, at this point this vector will have length 2, with a - // layout of `[snapshot(1), snapshot(2)]`, namely the two most significant pages of the - // snapshot. - let voter_pages: BoundedVec<_, T::Pages> = - voter_pages_range - .map(|p| { - crate::Snapshot::<T>::voters(p) - .ok_or(MinerError::SnapshotUnAvailable(SnapshotType::Voters(p))) - }) - .collect::<Result<Vec<_>, _>>()? - .try_into() - .expect("`voter_pages_range` has `.take(pages)`; it must have length less than pages; it must convert to `BoundedVec`; qed"); - // we also build this closure early, so we can let `targets` be consumed. let voter_page_fn = helpers::generate_voter_page_fn::<T>(&voter_pages); let target_index_fn = helpers::target_index_fn::<T>(&all_targets); @@ -196,7 +265,7 @@ impl<T: Config, Solver: NposSolver<AccountId = T::AccountId, Error = OffchainSol .try_into() .expect("Flattening the voters into `AllVoterPagesFlattenedOf` cannot fail; qed"); - let ElectionResult { winners: _, assignments } = Solver::solve( + let ElectionResult { winners: _, assignments } = T::Solver::solve( desired_targets as usize, all_targets.clone().to_vec(), all_voters.clone().into_inner(), @@ -232,29 +301,30 @@ impl<T: Config, Solver: NposSolver<AccountId = T::AccountId, Error = OffchainSol }; // 2. trim the supports by backing. - let (_pre_score, trim_support_count, final_trimmed_assignments) = { + let (_pre_score, final_trimmed_assignments, winners_removed, backers_removed) = { // these supports could very well be invalid for SCORE purposes. The reason is that // you might trim out half of an account's stake, but we don't look for this // account's other votes to fix it. - let mut supports_invalid_score = to_supports(&staked); + let supports_invalid_score = to_supports(&staked); let pre_score = (&supports_invalid_score).evaluate(); - let num_trimmed = Self::trim_supports(&mut supports_invalid_score); + let (bounded_invalid_score, winners_removed, backers_removed) = + SupportsOfMiner::<T>::sorted_truncate_from(supports_invalid_score); // now recreated the staked assignments - let staked = supports_to_staked_assignment(supports_invalid_score); + let staked = supports_to_staked_assignment(bounded_invalid_score.into()); let assignments = assignment_staked_to_ratio_normalized(staked) .map_err::<MinerError<T>, _>(Into::into)?; - (pre_score, num_trimmed, assignments) + (pre_score, assignments, winners_removed, backers_removed) }; - sublog!( + miner_log!( debug, - "unsigned::base-miner", - "initial score = {:?}, reduced {} edges, trimmed {} supports", + "initial score = {:?}, reduced {} edges, trimmed {} winners from supports, trimmed {} backers from support", _pre_score, reduced_count, - trim_support_count, + winners_removed, + backers_removed, ); final_trimmed_assignments @@ -306,27 +376,22 @@ impl<T: Config, Solver: NposSolver<AccountId = T::AccountId, Error = OffchainSol let solution_pages = solution_pages_unbounded .try_into() .expect("maybe_trim_weight_and_len cannot increase the length of its input; qed."); - sublog!( - debug, - "unsigned::base-miner", - "trimmed {} voters due to length/weight restriction.", - _trim_length_weight - ); + miner_log!(debug, "trimmed {} voters due to length restriction.", _trim_length_weight); // finally, wrap everything up. Assign a fake score here, since we might need to re-compute // it. - let round = crate::Pallet::<T>::round(); let mut paged = PagedRawSolution { round, solution_pages, score: Default::default() }; // OPTIMIZATION: we do feasibility_check inside `compute_score`, and once later // pre_dispatch. I think it is fine, but maybe we can improve it. - let score = Self::compute_score(&paged).map_err::<MinerError<T>, _>(Into::into)?; + let score = Self::compute_score(&paged, &voter_pages, &all_targets, desired_targets) + .map_err::<MinerError<T>, _>(Into::into)?; paged.score = score.clone(); - sublog!( + miner_log!( info, - "unsigned::base-miner", - "mined a solution with score {:?}, {} winners, {} voters, {} edges, and {} bytes", + "mined a solution with {} pages, score {:?}, {} winners, {} voters, {} edges, and {} bytes", + pages, score, paged.winner_count_single_page_target_snapshot(), paged.voter_count(), @@ -337,82 +402,60 @@ impl<T: Config, Solver: NposSolver<AccountId = T::AccountId, Error = OffchainSol Ok(paged) } - /// Mine a new solution. Performs the feasibility checks on it as well. - pub fn mine_checked_solution( - pages: PageIndex, - reduce: bool, - ) -> Result<PagedRawSolution<T>, MinerError<T>> { - let paged_solution = Self::mine_solution(pages, reduce)?; - let _ = Self::check_solution(&paged_solution, None, true, "mined")?; - Ok(paged_solution) - } - - /// Check the solution, from the perspective of the base miner: - /// - /// 1. snapshot-independent checks. - /// - with the fingerprint check being an optional step fo that. - /// 2. optionally, feasibility check. - /// - /// In most cases, you should always use this either with `do_feasibility = true` or - /// `maybe_snapshot_fingerprint.is_some()`. Doing both could be an overkill. The snapshot - /// staying constant (which can be checked via the hash) is a string guarantee that the - /// feasibility still holds. - pub fn check_solution( - paged_solution: &PagedRawSolution<T>, - maybe_snapshot_fingerprint: Option<T::Hash>, - do_feasibility: bool, - solution_type: &str, - ) -> Result<(), MinerError<T>> { - let _ = crate::Pallet::<T>::snapshot_independent_checks( - paged_solution, - maybe_snapshot_fingerprint, - ) - .map_err(|pe| MinerError::SnapshotIndependentChecks(pe))?; - - if do_feasibility { - let _ = Self::check_feasibility(&paged_solution, solution_type)?; - } - - Ok(()) - } - /// perform the feasibility check on all pages of a solution, returning `Ok(())` if all good and /// the corresponding error otherwise. pub fn check_feasibility( paged_solution: &PagedRawSolution<T>, + paged_voters: &AllVoterPagesOf<T>, + snapshot_targets: &BoundedVec<T::AccountId, T::TargetSnapshotPerBlock>, + desired_targets: u32, solution_type: &str, - ) -> Result<Vec<SupportsOf<T::Verifier>>, MinerError<T>> { + ) -> Result<Vec<SupportsOfMiner<T>>, MinerError<T>> { // check every solution page for feasibility. + let padded_voters = paged_voters.clone().pad_solution_pages(T::Pages::get()); paged_solution .solution_pages .pagify(T::Pages::get()) .map(|(page_index, page_solution)| { - <T::Verifier as verifier::Verifier>::feasibility_check_page( + verifier::feasibility_check_page_inner_with_snapshot::<T>( page_solution.clone(), - page_index as PageIndex, + &padded_voters[page_index as usize], + snapshot_targets, + desired_targets, ) }) .collect::<Result<Vec<_>, _>>() .map_err(|err| { - sublog!( + miner_log!( warn, - "unsigned::base-miner", "feasibility check failed for {} solution at: {:?}", solution_type, err ); MinerError::from(err) }) + .and_then(|supports| { + // TODO: Check `MaxBackersPerWinnerFinal` + Ok(supports) + }) } /// Take the given raw paged solution and compute its score. This will replicate what the chain /// would do as closely as possible, and expects all the corresponding snapshot data to be /// available. - fn compute_score(paged_solution: &PagedRawSolution<T>) -> Result<ElectionScore, MinerError<T>> { - use sp_npos_elections::EvaluateSupport; - use sp_std::collections::btree_map::BTreeMap; - - let all_supports = Self::check_feasibility(paged_solution, "mined")?; + fn compute_score( + paged_solution: &PagedRawSolution<T>, + paged_voters: &AllVoterPagesOf<T>, + all_targets: &BoundedVec<T::AccountId, T::TargetSnapshotPerBlock>, + desired_targets: u32, + ) -> Result<ElectionScore, MinerError<T>> { + let all_supports = Self::check_feasibility( + paged_solution, + paged_voters, + all_targets, + desired_targets, + "mined", + )?; let mut total_backings: BTreeMap<T::AccountId, ExtendedBalance> = BTreeMap::new(); all_supports.into_iter().map(|x| x.0).flatten().for_each(|(who, support)| { let backing = total_backings.entry(who).or_default(); @@ -436,7 +479,7 @@ impl<T: Config, Solver: NposSolver<AccountId = T::AccountId, Error = OffchainSol /// /// Returns the count of supports trimmed. pub fn trim_supports(supports: &mut sp_npos_elections::Supports<T::AccountId>) -> u32 { - let limit = <T::Verifier as crate::verifier::Verifier>::MaxBackersPerWinner::get() as usize; + let limit = T::MaxBackersPerWinner::get() as usize; let mut count = 0; supports .iter_mut() @@ -479,54 +522,13 @@ impl<T: Config, Solver: NposSolver<AccountId = T::AccountId, Error = OffchainSol paged_voters: &AllVoterPagesOf<T>, ) -> Result<u32, MinerError<T>> { debug_assert_eq!(solution_pages.len(), paged_voters.len()); - let size_limit = T::MinerMaxLength::get(); - let weight_limit = T::MinerMaxWeight::get(); - - let all_voters_count = crate::Snapshot::<T>::voters_decode_len(crate::Pallet::<T>::msp()) - .ok_or(MinerError::SnapshotUnAvailable(SnapshotType::Voters( - crate::Pallet::<T>::msp(), - )))? as u32; - let all_targets_count = crate::Snapshot::<T>::targets_decode_len() - .ok_or(MinerError::SnapshotUnAvailable(SnapshotType::Targets))? - as u32; - let desired_targets = crate::Snapshot::<T>::desired_targets() - .ok_or(MinerError::SnapshotUnAvailable(SnapshotType::DesiredTargets))?; - - let winner_count_of = |solution_pages: &Vec<SolutionOf<T>>| { - solution_pages - .iter() - .map(|page| page.unique_targets()) - .flatten() - .collect::<sp_std::collections::btree_set::BTreeSet<_>>() - .len() as u32 - }; - - let voter_count_of = |solution_pages: &Vec<SolutionOf<T>>| { - solution_pages - .iter() - .map(|page| page.voter_count()) - .fold(0, |acc, x| acc.saturating_add(x)) as u32 - }; + let size_limit = T::MaxLength::get(); let needs_any_trim = |solution_pages: &mut Vec<SolutionOf<T>>| { let size = solution_pages.encoded_size() as u32; - - let next_active_targets = winner_count_of(solution_pages); - if next_active_targets < desired_targets { - sublog!(warn, "unsigned::base-miner", "trimming has cause a solution to have less targets than desired, this might fail feasibility"); - } - - let weight = <T as Config>::WeightInfo::submit_unsigned( - all_voters_count, - all_targets_count, - // NOTE: we could not re-compute this all the time and instead assume that in each - // round, it is the previous value minus one. - voter_count_of(solution_pages), - next_active_targets, - ); - let needs_weight_trim = weight.any_gt(weight_limit); let needs_len_trim = size > size_limit; - + // a reminder that we used to have weight trimming here, but not more! + let needs_weight_trim = false; needs_weight_trim || needs_len_trim }; @@ -573,9 +575,8 @@ impl<T: Config, Solver: NposSolver<AccountId = T::AccountId, Error = OffchainSol let stake_of_fn = current_trimming_page_stake_of(current_trimming_page); page.remove_weakest_sorted(&stake_of_fn) }) { - sublog!( + miner_log!( trace, - "unsigned::base-miner", "removed voter at index {:?} of (un-pagified) page {} as the weakest due to weight/length limits.", removed_idx, current_trimming_page @@ -584,9 +585,8 @@ impl<T: Config, Solver: NposSolver<AccountId = T::AccountId, Error = OffchainSol removed.saturating_inc(); } else { // this page cannot support remove anymore. Try and go to the next page. - sublog!( + miner_log!( debug, - "unsigned::base-miner", "page {} seems to be fully empty now, moving to the next one", current_trimming_page ); @@ -595,9 +595,8 @@ impl<T: Config, Solver: NposSolver<AccountId = T::AccountId, Error = OffchainSol current_trimming_page = next_page; sort_current_trimming_page(current_trimming_page, solution_pages); } else { - sublog!( + miner_log!( warn, - "unsigned::base-miner", "no more pages to trim from at page {}, already trimmed", current_trimming_page ); @@ -611,6 +610,8 @@ impl<T: Config, Solver: NposSolver<AccountId = T::AccountId, Error = OffchainSol } /// A miner that is suited to work inside offchain worker environment. +/// +/// This is parameterized by [`Config`], rather than [`MinerConfig`]. pub(crate) struct OffchainWorkerMiner<T: Config>(sp_std::marker::PhantomData<T>); impl<T: Config> OffchainWorkerMiner<T> { @@ -623,6 +624,68 @@ impl<T: Config> OffchainWorkerMiner<T> { /// The number of pages that the offchain worker miner will try and mine. const MINING_PAGES: PageIndex = 1; + pub(crate) fn fetch_snapshot( + pages: PageIndex, + ) -> Result< + (AllVoterPagesOf<T::MinerConfig>, BoundedVec<T::AccountId, T::TargetSnapshotPerBlock>, u32), + OffchainMinerError<T>, + > { + // read the appropriate snapshot pages. + let desired_targets = crate::Snapshot::<T>::desired_targets() + .ok_or(MinerError::SnapshotUnAvailable(SnapshotType::DesiredTargets))?; + let all_targets = crate::Snapshot::<T>::targets() + .ok_or(MinerError::SnapshotUnAvailable(SnapshotType::Targets))?; + + // This is the range of voters that we are interested in. Mind the second `.rev`, it is + // super critical. + let voter_pages_range = (crate::Pallet::<T>::lsp()..crate::Pallet::<T>::msp() + 1) + .rev() + .take(pages as usize) + .rev(); + + sublog!( + debug, + "unsigned::base-miner", + "mining a solution with {} pages, voter snapshot range will be: {:?}", + pages, + voter_pages_range.clone().collect::<Vec<_>>() + ); + + // NOTE: if `pages (2) < T::Pages (3)`, at this point this vector will have length 2, + // with a layout of `[snapshot(1), snapshot(2)]`, namely the two most significant pages + // of the snapshot. + let voter_pages: BoundedVec<_, T::Pages> = voter_pages_range + .map(|p| { + crate::Snapshot::<T>::voters(p) + .ok_or(MinerError::SnapshotUnAvailable(SnapshotType::Voters(p))) + }) + .collect::<Result<Vec<_>, _>>()? + .try_into() + .expect( + "`voter_pages_range` has `.take(pages)`; it must have length less than pages; it + must convert to `BoundedVec`; qed", + ); + + Ok((voter_pages, all_targets, desired_targets)) + } + + pub(crate) fn mine_solution( + pages: PageIndex, + do_reduce: bool, + ) -> Result<PagedRawSolution<T::MinerConfig>, OffchainMinerError<T>> { + let (voter_pages, all_targets, desired_targets) = Self::fetch_snapshot(pages)?; + let round = crate::Pallet::<T>::round(); + BaseMiner::<T::MinerConfig>::mine_solution(MineInput { + desired_targets, + all_targets, + voter_pages, + pages, + do_reduce, + round, + }) + .map_err(Into::into) + } + /// Get a checked solution from the base miner, ensure unsigned-specific checks also pass, then /// return an submittable call. fn mine_checked_call() -> Result<Call<T>, OffchainMinerError<T>> { @@ -631,9 +694,8 @@ impl<T: Config> OffchainWorkerMiner<T> { // NOTE: we don't run any checks in the base miner, and run all of them via // `Self::full_checks`. - let paged_solution = - BaseMiner::<T, T::OffchainSolver>::mine_solution(Self::MINING_PAGES, reduce) - .map_err::<OffchainMinerError<T>, _>(Into::into)?; + let paged_solution = Self::mine_solution(Self::MINING_PAGES, reduce) + .map_err::<OffchainMinerError<T>, _>(Into::into)?; // check the call fully, no fingerprinting. let _ = Self::check_solution(&paged_solution, None, true, "mined")?; @@ -660,23 +722,19 @@ impl<T: Config> OffchainWorkerMiner<T> { /// 2. snapshot-independent checks. /// 1. optionally, snapshot fingerprint. pub fn check_solution( - paged_solution: &PagedRawSolution<T>, + paged_solution: &PagedRawSolution<T::MinerConfig>, maybe_snapshot_fingerprint: Option<T::Hash>, do_feasibility: bool, solution_type: &str, ) -> Result<(), OffchainMinerError<T>> { // NOTE: we prefer cheap checks first, so first run unsigned checks. - Pallet::unsigned_specific_checks(paged_solution) - .map_err(|pe| OffchainMinerError::UnsignedChecks(pe)) - .and_then(|_| { - BaseMiner::<T, T::OffchainSolver>::check_solution( - paged_solution, - maybe_snapshot_fingerprint, - do_feasibility, - solution_type, - ) - .map_err(OffchainMinerError::BaseMiner) - }) + Pallet::<T>::unsigned_specific_checks(paged_solution)?; + Self::base_check_solution( + paged_solution, + maybe_snapshot_fingerprint, + do_feasibility, + solution_type, + ) } fn submit_call(call: Call<T>) -> Result<(), OffchainMinerError<T>> { @@ -697,6 +755,45 @@ impl<T: Config> OffchainWorkerMiner<T> { .map_err(|_| OffchainMinerError::PoolSubmissionFailed) } + /// Check the solution, from the perspective of the base miner: + /// + /// 1. snapshot-independent checks. + /// - with the fingerprint check being an optional step fo that. + /// 2. optionally, feasibility check. + /// + /// In most cases, you should always use this either with `do_feasibility = true` or + /// `maybe_snapshot_fingerprint.is_some()`. Doing both could be an overkill. The snapshot + /// staying constant (which can be checked via the hash) is a string guarantee that the + /// feasibility still holds. + /// + /// The difference between this and [`Self::check_solution`] is that this does not run unsigned + /// specific checks. + pub(crate) fn base_check_solution( + paged_solution: &PagedRawSolution<T::MinerConfig>, + maybe_snapshot_fingerprint: Option<T::Hash>, + do_feasibility: bool, + solution_type: &str, // TODO: remove + ) -> Result<(), OffchainMinerError<T>> { + let _ = crate::Pallet::<T>::snapshot_independent_checks( + paged_solution, + maybe_snapshot_fingerprint, + )?; + + if do_feasibility { + let (voter_pages, all_targets, desired_targets) = + Self::fetch_snapshot(paged_solution.solution_pages.len() as PageIndex)?; + let _ = BaseMiner::<T::MinerConfig>::check_feasibility( + &paged_solution, + &voter_pages, + &all_targets, + desired_targets, + solution_type, + )?; + } + + Ok(()) + } + /// Attempt to restore a solution from cache. Otherwise, compute it fresh. Either way, /// submit if our call's score is greater than that of the cached solution. pub fn restore_or_compute_then_maybe_submit() -> Result<(), OffchainMinerError<T>> { @@ -723,17 +820,17 @@ impl<T: Config> OffchainWorkerMiner<T> { } }) .or_else::<OffchainMinerError<T>, _>(|error| { - use MinerError::*; - use OffchainMinerError::*; - + use OffchainMinerError as OE; + use MinerError as ME; + use CommonError as CE; match error { - NoStoredSolution => { + OE::NoStoredSolution => { // IFF, not present regenerate. let call = Self::mine_checked_call()?; Self::save_solution(&call, crate::Snapshot::<T>::fingerprint())?; Ok(call) }, - UnsignedChecks(ref e) => { + OE::Common(ref e) => { sublog!( error, "unsigned::ocw-miner", @@ -743,9 +840,9 @@ impl<T: Config> OffchainWorkerMiner<T> { Self::clear_offchain_solution_cache(); Err(error) }, - BaseMiner(Feasibility(_)) - | BaseMiner(SnapshotIndependentChecks(crate::Error::<T>::WrongRound)) - | BaseMiner(SnapshotIndependentChecks(crate::Error::<T>::WrongFingerprint)) + OE::BaseMiner(ME::Feasibility(_)) + | OE::BaseMiner(ME::Common(CE::WrongRound)) + | OE::BaseMiner(ME::Common(CE::WrongFingerprint)) => { // note that failing `Feasibility` can only mean that the solution was // computed over a snapshot that has changed due to a fork. @@ -865,211 +962,8 @@ mod trim_weight_length { use super::*; use crate::{mock::*, verifier::Verifier}; use frame_election_provider_support::TryFromUnboundedPagedSupports; - use frame_support::pallet_prelude::*; use sp_npos_elections::Support; - #[test] - fn trim_weight_basic() { - // This is just demonstration to show the normal election result with new votes, without any - // trimming. - ExtBuilder::unsigned().build_and_execute(|| { - let mut current_voters = Voters::get(); - current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who); - Voters::set(current_voters); - - roll_to_snapshot_created(); - ensure_voters(3, 12); - - let solution = mine_full_solution().unwrap(); - - // 4 of these will be trimmed. - assert_eq!( - solution.solution_pages.iter().map(|page| page.voter_count()).sum::<usize>(), - 8 - ); - - load_mock_signed_and_start(solution); - let supports = roll_to_full_verification(); - - // NOTE: this test is a bit funny because our msp snapshot page actually contains voters - // with less stake than lsp.. but that's not relevant here. - assert_eq!( - supports, - vec![ - // supports from 30, 40, both will be removed. - vec![ - (30, Support { total: 30, voters: vec![(30, 30)] }), - (40, Support { total: 40, voters: vec![(40, 40)] }) - ], - // supports from 5, 6, 7. 5 and 6 will be removed. - vec![ - (30, Support { total: 11, voters: vec![(7, 7), (5, 2), (6, 2)] }), - (40, Support { total: 7, voters: vec![(5, 3), (6, 4)] }) - ], - // all will stay - vec![(40, Support { total: 9, voters: vec![(2, 2), (3, 3), (4, 4)] })] - ] - .try_from_unbounded_paged() - .unwrap() - ); - }); - - // now we get to the real test... - ExtBuilder::unsigned() - .miner_weight(Weight::from_parts(4, u64::MAX)) - .build_and_execute(|| { - // first, replace the stake of all voters with their account id. - let mut current_voters = Voters::get(); - current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who); - Voters::set(current_voters); - - // with 1 weight unit per voter, this can only support 4 voters, despite having 12 - // in the snapshot. - roll_to_snapshot_created(); - ensure_voters(3, 12); - - let solution = mine_full_solution().unwrap(); - assert_eq!( - solution.solution_pages.iter().map(|page| page.voter_count()).sum::<usize>(), - 4 - ); - - load_mock_signed_and_start(solution); - let supports = roll_to_full_verification(); - - // a solution is queued. - assert!(VerifierPallet::queued_score().is_some()); - - assert_eq!( - supports, - vec![ - vec![], - vec![(30, Support { total: 7, voters: vec![(7, 7)] })], - vec![(40, Support { total: 9, voters: vec![(2, 2), (3, 3), (4, 4)] })] - ] - .try_from_unbounded_paged() - .unwrap() - ); - }) - } - - #[test] - fn trim_weight_partial_solution() { - // This is just demonstration to show the normal election result with new votes, without any - // trimming. - ExtBuilder::unsigned().build_and_execute(|| { - let mut current_voters = Voters::get(); - current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who); - Voters::set(current_voters); - - roll_to_snapshot_created(); - ensure_voters(3, 12); - - let solution = mine_solution(2).unwrap(); - - // 3 of these will be trimmed. - assert_eq!( - solution.solution_pages.iter().map(|page| page.voter_count()).sum::<usize>(), - 7 - ); - - load_mock_signed_and_start(solution); - let supports = roll_to_full_verification(); - - // a solution is queued. - assert!(VerifierPallet::queued_score().is_some()); - - assert_eq!( - supports, - vec![ - vec![], - // 5, 6, 7 will be removed in the next test block - vec![ - (10, Support { total: 10, voters: vec![(8, 8), (5, 2)] }), - (30, Support { total: 16, voters: vec![(6, 6), (7, 7), (5, 3)] }) - ], - vec![ - (10, Support { total: 5, voters: vec![(1, 1), (4, 4)] }), - (30, Support { total: 2, voters: vec![(2, 2)] }) - ] - ] - .try_from_unbounded_paged() - .unwrap() - ); - }); - - // now we get to the real test... - ExtBuilder::unsigned() - .miner_weight(Weight::from_parts(4, u64::MAX)) - .build_and_execute(|| { - // first, replace the stake of all voters with their account id. - let mut current_voters = Voters::get(); - current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who); - Voters::set(current_voters); - - roll_to_snapshot_created(); - ensure_voters(3, 12); - - let solution = mine_solution(2).unwrap(); - assert_eq!( - solution.solution_pages.iter().map(|page| page.voter_count()).sum::<usize>(), - 4 - ); - - load_mock_signed_and_start(solution); - let supports = roll_to_full_verification(); - - // a solution is queued. - assert!(VerifierPallet::queued_score().is_some()); - - assert_eq!( - supports, - vec![ - vec![], - vec![(10, Support { total: 8, voters: vec![(8, 8)] })], - vec![ - (10, Support { total: 5, voters: vec![(1, 1), (4, 4)] }), - (30, Support { total: 2, voters: vec![(2, 2)] }) - ] - ] - .try_from_unbounded_paged() - .unwrap() - ); - }) - } - - #[test] - fn trim_weight_too_much_makes_solution_invalid() { - // with just 1 units, we can support 1 voter. This is not enough to have 2 winner which we - // want. - ExtBuilder::unsigned() - .miner_weight(Weight::from_parts(1, u64::MAX)) - .build_and_execute(|| { - let mut current_voters = Voters::get(); - current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who); - Voters::set(current_voters); - - roll_to_snapshot_created(); - ensure_voters(3, 12); - - let solution = mine_full_solution().unwrap(); - assert_eq!( - solution.solution_pages.iter().map(|page| page.voter_count()).sum::<usize>(), - 1 - ); - - load_mock_signed_and_start(solution); - let supports = roll_to_full_verification(); - - // nothing is queued - assert!(VerifierPallet::queued_score().is_none()); - assert_eq!( - supports, - vec![vec![], vec![], vec![]].try_from_unbounded_paged().unwrap() - ); - }) - } - #[test] fn trim_length() { // This is just demonstration to show the normal election result with new votes, without any @@ -1159,6 +1053,8 @@ mod trim_weight_length { #[cfg(test)] mod base_miner { + use std::vec; + use super::*; use crate::{mock::*, Snapshot}; use frame_election_provider_support::TryFromUnboundedPagedSupports; @@ -1217,7 +1113,8 @@ mod base_miner { assert_eq!(paged.solution_pages.len(), 1); // this solution must be feasible and submittable. - BaseMiner::<Runtime>::check_solution(&paged, None, true, "mined").unwrap(); + OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, true, "mined") + .unwrap(); // now do a realistic full verification load_mock_signed_and_start(paged.clone()); @@ -1303,7 +1200,8 @@ mod base_miner { ); // this solution must be feasible and submittable. - BaseMiner::<Runtime>::check_solution(&paged, None, false, "mined").unwrap(); + OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, false, "mined") + .unwrap(); // it must also be verified in the verifier load_mock_signed_and_start(paged.clone()); @@ -1390,7 +1288,8 @@ mod base_miner { ); // this solution must be feasible and submittable. - BaseMiner::<Runtime>::check_solution(&paged, None, true, "mined").unwrap(); + OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, true, "mined") + .unwrap(); // now do a realistic full verification load_mock_signed_and_start(paged.clone()); let supports = roll_to_full_verification(); @@ -1469,7 +1368,8 @@ mod base_miner { ); // this solution must be feasible and submittable. - BaseMiner::<Runtime>::check_solution(&paged, None, true, "mined").unwrap(); + OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, true, "mined") + .unwrap(); // now do a realistic full verification. load_mock_signed_and_start(paged.clone()); let supports = roll_to_full_verification(); @@ -1533,7 +1433,8 @@ mod base_miner { let paged = mine_solution(2).unwrap(); // this solution must be feasible and submittable. - BaseMiner::<Runtime>::check_solution(&paged, None, true, "mined").unwrap(); + OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, true, "mined") + .unwrap(); assert_eq!( paged.solution_pages, @@ -1563,7 +1464,8 @@ mod base_miner { ); // this solution must be feasible and submittable. - BaseMiner::<Runtime>::check_solution(&paged, None, true, "mined").unwrap(); + OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, true, "mined") + .unwrap(); // now do a realistic full verification. load_mock_signed_and_start(paged.clone()); let supports = roll_to_full_verification(); @@ -1599,12 +1501,12 @@ mod base_miner { fn can_reduce_solution() { ExtBuilder::unsigned().build_and_execute(|| { roll_to_snapshot_created(); - let full_edges = BaseMiner::<Runtime>::mine_solution(Pages::get(), false) + let full_edges = OffchainWorkerMiner::<Runtime>::mine_solution(Pages::get(), false) .unwrap() .solution_pages .iter() .fold(0, |acc, x| acc + x.edge_count()); - let reduced_edges = BaseMiner::<Runtime>::mine_solution(Pages::get(), true) + let reduced_edges = OffchainWorkerMiner::<Runtime>::mine_solution(Pages::get(), true) .unwrap() .solution_pages .iter() @@ -1615,7 +1517,7 @@ mod base_miner { } #[test] - fn trim_backings_works() { + fn trim_backers_per_page_works() { ExtBuilder::unsigned() .max_backers_per_winner(5) .voter_per_page(8) @@ -1654,7 +1556,7 @@ mod base_miner { 40, Support { total: 40, - voters: vec![(2, 10), (3, 10), (4, 10), (6, 10)] + voters: vec![(2, 10), (3, 10), (5, 10), (6, 10)] } ) ] @@ -1664,11 +1566,112 @@ mod base_miner { ); }) } + + #[test] + fn trim_backers_final_works() { + ExtBuilder::unsigned() + .max_backers_per_winner_final(3) + .pages(3) + .build_and_execute(|| { + roll_to_snapshot_created(); + + let paged = mine_full_solution().unwrap(); + load_mock_signed_and_start(paged.clone()); + + // this must be correct + let supports = roll_to_full_verification(); + + assert_eq!( + verifier_events(), + vec![ + verifier::Event::Verified(2, 2), + verifier::Event::Verified(1, 2), + verifier::Event::Verified(0, 2), + verifier::Event::VerificationFailed( + 0, + verifier::FeasibilityError::FailedToBoundSupport + ) + ] + ); + todo!("miner should trim max backers final"); + + assert_eq!( + supports, + vec![ + // 1 backing for 10 + vec![(10, Support { total: 8, voters: vec![(104, 8)] })], + // 2 backings for 10 + vec![ + (10, Support { total: 17, voters: vec![(10, 10), (103, 7)] }), + (40, Support { total: 40, voters: vec![(40, 40)] }) + ], + // 20 backings for 10 + vec![ + (10, Support { total: 20, voters: vec![(1, 10), (8, 10)] }), + ( + 40, + Support { + total: 40, + voters: vec![(2, 10), (3, 10), (4, 10), (6, 10)] + } + ) + ] + ] + .try_from_unbounded_paged() + .unwrap() + ); + }); + } + + #[test] + fn trim_backers_final_works_2() { + ExtBuilder::unsigned() + .max_backers_per_winner_final(4) + .max_backers_per_winner(2) + .pages(3) + .build_and_execute(|| { + roll_to_snapshot_created(); + + let paged = mine_full_solution().unwrap(); + load_mock_signed_and_start(paged.clone()); + + // this must be correct + let supports = roll_to_full_verification(); + + // 10 has no more than 5 backings, and from the new voters that we added in this + // test, the most staked ones stayed (103, 104) and the rest trimmed. + assert_eq!( + supports, + vec![ + // 1 backing for 10 + vec![(10, Support { total: 8, voters: vec![(104, 8)] })], + // 2 backings for 10 + vec![ + (10, Support { total: 17, voters: vec![(10, 10), (103, 7)] }), + (40, Support { total: 40, voters: vec![(40, 40)] }) + ], + // 20 backings for 10 + vec![ + (10, Support { total: 20, voters: vec![(1, 10), (8, 10)] }), + ( + 40, + Support { + total: 40, + voters: vec![(2, 10), (3, 10), (4, 10), (6, 10)] + } + ) + ] + ] + .try_from_unbounded_paged() + .unwrap() + ); + }); + } } #[cfg(test)] mod offchain_worker_miner { - use crate::verifier::Verifier; + use crate::{verifier::Verifier, CommonError}; use frame_support::traits::Hooks; use sp_runtime::offchain::storage_lock::{BlockAndTime, StorageLock}; @@ -1934,8 +1937,9 @@ mod offchain_worker_miner { vec![vec![(40, Support { total: 10, voters: vec![(3, 10)] })]], 0, ); - let weak_call = - crate::unsigned::Call::submit_unsigned { paged_solution: Box::new(weak_solution) }; + let weak_call = crate::unsigned::Call::<T>::submit_unsigned { + paged_solution: Box::new(weak_solution), + }; call_cache.set(&weak_call); // run again @@ -2012,9 +2016,7 @@ mod offchain_worker_miner { // beautiful errors, isn't it? assert_eq!( OffchainWorkerMiner::<Runtime>::mine_checked_call().unwrap_err(), - OffchainMinerError::BaseMiner(MinerError::SnapshotIndependentChecks( - crate::Error::<Runtime>::WrongWinnerCount - )) + OffchainMinerError::Common(CommonError::WrongWinnerCount) ); }); } diff --git a/substrate/frame/election-provider-multi-block/src/unsigned/mod.rs b/substrate/frame/election-provider-multi-block/src/unsigned/mod.rs index 471ee54f942..87d9754b74e 100644 --- a/substrate/frame/election-provider-multi-block/src/unsigned/mod.rs +++ b/substrate/frame/election-provider-multi-block/src/unsigned/mod.rs @@ -71,21 +71,18 @@ mod pallet { types::*, unsigned::miner::{self}, verifier::Verifier, + CommonError, }; use frame_support::pallet_prelude::*; use frame_system::{offchain::CreateInherent, pallet_prelude::*}; use sp_runtime::traits::SaturatedConversion; use sp_std::prelude::*; - /// convert a DispatchError to a custom InvalidTransaction with the inner code being the error - /// number. - fn dispatch_error_to_invalid(error: sp_runtime::DispatchError) -> InvalidTransaction { - use sp_runtime::ModuleError; - let error_number = match error { - DispatchError::Module(ModuleError { error, .. }) => error, - _ => [0u8, 0, 0, 0], - }; - InvalidTransaction::Custom(error_number[0] as u8) + /// convert a [`crate::CommonError`] to a custom InvalidTransaction with the inner code being + /// the index of the variant. + fn base_error_to_invalid(error: CommonError) -> InvalidTransaction { + let index = error.encode().pop().unwrap_or(0); + InvalidTransaction::Custom(index) } pub trait WeightInfo { @@ -114,16 +111,6 @@ mod pallet { /// The priority of the unsigned transaction submitted in the unsigned-phase type MinerTxPriority: Get<TransactionPriority>; - /// Maximum weight that the miner should consume. - /// - /// The miner will ensure that the total weight of the unsigned solution will not exceed - /// this value, based on [`WeightInfo::submit_unsigned`]. - type MinerMaxWeight: Get<Weight>; - /// Maximum length (bytes) that the mined solution should consume. - /// - /// The miner will ensure that the total length of the unsigned solution will not exceed - /// this value. - type MinerMaxLength: Get<u32>; type WeightInfo: WeightInfo; } @@ -145,9 +132,10 @@ mod pallet { #[pallet::call_index(0)] pub fn submit_unsigned( origin: OriginFor<T>, - paged_solution: Box<PagedRawSolution<T>>, + paged_solution: Box<PagedRawSolution<T::MinerConfig>>, ) -> DispatchResultWithPostInfo { ensure_none(origin)?; + // TODO: remove the panic from this function for now. let error_message = "Invalid unsigned submission must produce invalid block and \ deprive validator from their authoring reward."; @@ -199,7 +187,7 @@ mod pallet { ); err }) - .map_err(dispatch_error_to_invalid)?; + .map_err(base_error_to_invalid)?; ValidTransaction::with_tag_prefix("OffchainElection") // The higher the score.minimal_stake, the better a paged_solution is. @@ -223,7 +211,7 @@ mod pallet { fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { if let Call::submit_unsigned { paged_solution, .. } = call { Self::validate_unsigned_checks(paged_solution.as_ref()) - .map_err(dispatch_error_to_invalid) + .map_err(base_error_to_invalid) .map_err(Into::into) } else { Err(InvalidTransaction::Call.into()) @@ -233,6 +221,11 @@ mod pallet { #[pallet::hooks] impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> { + fn integrity_test() { + // TODO: weight of a single page verification should be well below what we desire to + // have. + } + fn offchain_worker(now: BlockNumberFor<T>) { use sp_runtime::offchain::storage_lock::{BlockAndTime, StorageLock}; @@ -311,8 +304,8 @@ mod pallet { /// These check both for snapshot independent checks, and some checks that are specific to /// the unsigned phase. pub(crate) fn validate_unsigned_checks( - paged_solution: &PagedRawSolution<T>, - ) -> DispatchResult { + paged_solution: &PagedRawSolution<T::MinerConfig>, + ) -> Result<(), CommonError> { Self::unsigned_specific_checks(paged_solution) .and(crate::Pallet::<T>::snapshot_independent_checks(paged_solution, None)) .map_err(Into::into) @@ -322,20 +315,20 @@ mod pallet { /// /// ensure solution has the correct phase, and it has only 1 page. pub fn unsigned_specific_checks( - paged_solution: &PagedRawSolution<T>, - ) -> Result<(), crate::Error<T>> { + paged_solution: &PagedRawSolution<T::MinerConfig>, + ) -> Result<(), CommonError> { ensure!( crate::Pallet::<T>::current_phase().is_unsigned(), - crate::Error::<T>::EarlySubmission + CommonError::EarlySubmission ); - - ensure!(paged_solution.solution_pages.len() == 1, crate::Error::<T>::WrongPageCount); + ensure!(paged_solution.solution_pages.len() == 1, CommonError::WrongPageCount); Ok(()) } #[cfg(test)] pub(crate) fn sanity_check() -> Result<(), &'static str> { + // TODO Ok(()) } } diff --git a/substrate/frame/election-provider-multi-block/src/verifier/impls.rs b/substrate/frame/election-provider-multi-block/src/verifier/impls.rs index ccb923acf16..f1f40c3100f 100644 --- a/substrate/frame/election-provider-multi-block/src/verifier/impls.rs +++ b/substrate/frame/election-provider-multi-block/src/verifier/impls.rs @@ -16,13 +16,21 @@ // limitations under the License. use super::*; -use crate::{helpers, types::SupportsOf, verifier::Verifier, SolutionOf}; +use crate::{ + helpers, + types::VoterOf, + unsigned::miner::{MinerConfig, SupportsOfMiner}, + verifier::Verifier, + SolutionOf, +}; use codec::{Decode, Encode, MaxEncodedLen}; -use frame_election_provider_support::{ExtendedBalance, NposSolution, PageIndex}; +use frame_election_provider_support::{ + ExtendedBalance, NposSolution, PageIndex, TryFromOtherBounds, +}; use frame_support::{ ensure, pallet_prelude::{ValueQuery, *}, - traits::{Defensive, Get}, + traits::{defensive_prelude::*, Defensive, Get}, }; use frame_system::pallet_prelude::*; use pallet::*; @@ -30,6 +38,12 @@ use sp_npos_elections::{evaluate_support, ElectionScore, EvaluateSupport}; use sp_runtime::{Perbill, RuntimeDebug}; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; +pub(crate) type SupportsOfVerifier<V> = frame_election_provider_support::BoundedSupports< + <V as Verifier>::AccountId, + <V as Verifier>::MaxWinnersPerPage, + <V as Verifier>::MaxBackersPerWinner, +>; + /// The status of this pallet. #[derive(Encode, Decode, scale_info::TypeInfo, Clone, Copy, MaxEncodedLen, RuntimeDebug)] #[cfg_attr(any(test, debug_assertions), derive(PartialEq, Eq))] @@ -119,7 +133,9 @@ pub(crate) mod pallet { /// Something that can provide the solution data to the verifier. /// /// In reality, this will be fulfilled by the signed phase. - type SolutionDataProvider: crate::verifier::SolutionDataProvider<Solution = Self::Solution>; + type SolutionDataProvider: crate::verifier::SolutionDataProvider< + Solution = SolutionOf<Self::MinerConfig>, + >; /// The weight information of this pallet. type WeightInfo; @@ -247,14 +263,14 @@ pub(crate) mod pallet { /// known to be valid. At this stage, we write to the invalid variant. Once all pages are /// verified, a call to [`finalize_correct`] will seal the correct pages and flip the /// invalid/valid variants. - pub(crate) fn set_invalid_page(page: PageIndex, supports: SupportsOf<Pallet<T>>) { + pub(crate) fn set_invalid_page(page: PageIndex, supports: SupportsOfVerifier<Pallet<T>>) { use frame_support::traits::TryCollect; Self::mutate_checked(|| { let backings: BoundedVec<_, _> = supports .iter() .map(|(x, s)| (x.clone(), PartialBackings { total: s.total, backers: s.voters.len() as u32 } )) .try_collect() - .expect("`SupportsOf` is bounded by <Pallet<T> as Verifier>::MaxWinnersPerPage, which is assured to be the same as `T::MaxWinnersPerPage` in an integrity test"); + .expect("`SupportsOfVerifier` is bounded by <Pallet<T> as Verifier>::MaxWinnersPerPage, which is assured to be the same as `T::MaxWinnersPerPage` in an integrity test"); QueuedSolutionBackings::<T>::insert(page, backings); match Self::invalid() { @@ -272,7 +288,7 @@ pub(crate) mod pallet { /// solution. pub(crate) fn force_set_single_page_valid( page: PageIndex, - supports: SupportsOf<Pallet<T>>, + supports: SupportsOfVerifier<Pallet<T>>, score: ElectionScore, ) { Self::mutate_checked(|| { @@ -351,7 +367,9 @@ pub(crate) mod pallet { } /// Get a page of the current queued (aka valid) solution. - pub(crate) fn get_queued_solution_page(page: PageIndex) -> Option<SupportsOf<Pallet<T>>> { + pub(crate) fn get_queued_solution_page( + page: PageIndex, + ) -> Option<SupportsOfVerifier<Pallet<T>>> { match Self::valid() { ValidSolution::X => QueuedSolutionX::<T>::get(page), ValidSolution::Y => QueuedSolutionY::<T>::get(page), @@ -369,21 +387,23 @@ pub(crate) mod pallet { #[cfg(any(test, debug_assertions))] impl<T: Config> QueuedSolution<T> { - pub(crate) fn valid_iter() -> impl Iterator<Item = (PageIndex, SupportsOf<Pallet<T>>)> { + pub(crate) fn valid_iter( + ) -> impl Iterator<Item = (PageIndex, SupportsOfVerifier<Pallet<T>>)> { match Self::valid() { ValidSolution::X => QueuedSolutionX::<T>::iter(), ValidSolution::Y => QueuedSolutionY::<T>::iter(), } } - pub(crate) fn invalid_iter() -> impl Iterator<Item = (PageIndex, SupportsOf<Pallet<T>>)> { + pub(crate) fn invalid_iter( + ) -> impl Iterator<Item = (PageIndex, SupportsOfVerifier<Pallet<T>>)> { match Self::invalid() { ValidSolution::X => QueuedSolutionX::<T>::iter(), ValidSolution::Y => QueuedSolutionY::<T>::iter(), } } - pub(crate) fn get_valid_page(page: PageIndex) -> Option<SupportsOf<Pallet<T>>> { + pub(crate) fn get_valid_page(page: PageIndex) -> Option<SupportsOfVerifier<Pallet<T>>> { match Self::valid() { ValidSolution::X => QueuedSolutionX::<T>::get(page), ValidSolution::Y => QueuedSolutionY::<T>::get(page), @@ -451,10 +471,12 @@ pub(crate) mod pallet { /// Writing them to a bugger and copying at the ned is slightly better, but expensive. This flag /// system is best of both worlds. #[pallet::storage] - type QueuedSolutionX<T: Config> = StorageMap<_, Twox64Concat, PageIndex, SupportsOf<Pallet<T>>>; + type QueuedSolutionX<T: Config> = + StorageMap<_, Twox64Concat, PageIndex, SupportsOfVerifier<Pallet<T>>>; #[pallet::storage] /// The `Y` variant of the current queued solution. Might be the valid one or not. - type QueuedSolutionY<T: Config> = StorageMap<_, Twox64Concat, PageIndex, SupportsOf<Pallet<T>>>; + type QueuedSolutionY<T: Config> = + StorageMap<_, Twox64Concat, PageIndex, SupportsOfVerifier<Pallet<T>>>; /// Pointer to the variant of [`QueuedSolutionX`] or [`QueuedSolutionY`] that is currently /// valid. #[pallet::storage] @@ -600,10 +622,10 @@ impl<T: Config> Pallet<T> { } fn do_verify_synchronous( - partial_solution: T::Solution, + partial_solution: SolutionOf<T::MinerConfig>, claimed_score: ElectionScore, page: PageIndex, - ) -> Result<SupportsOf<Self>, FeasibilityError> { + ) -> Result<SupportsOfVerifier<Self>, FeasibilityError> { // first, ensure this score will be good enough, even if valid.. let _ = Self::ensure_score_quality(claimed_score)?; @@ -691,75 +713,27 @@ impl<T: Config> Pallet<T> { /// - checks the number of winners to be less than or equal to `DesiredTargets` IN THIS PAGE /// ONLY. pub(super) fn feasibility_check_page_inner( - partial_solution: SolutionOf<T>, + partial_solution: SolutionOf<T::MinerConfig>, page: PageIndex, - ) -> Result<SupportsOf<Self>, FeasibilityError> { + ) -> Result<SupportsOfVerifier<Self>, FeasibilityError> { // Read the corresponding snapshots. let snapshot_targets = crate::Snapshot::<T>::targets().ok_or(FeasibilityError::SnapshotUnavailable)?; let snapshot_voters = crate::Snapshot::<T>::voters(page).ok_or(FeasibilityError::SnapshotUnavailable)?; - - // ----- Start building. First, we need some closures. - let cache = helpers::generate_voter_cache::<T, _>(&snapshot_voters); - let voter_at = helpers::voter_at_fn::<T>(&snapshot_voters); - let target_at = helpers::target_at_fn::<T>(&snapshot_targets); - let voter_index = helpers::voter_index_fn_usize::<T>(&cache); - - // Then convert solution -> assignment. This will fail if any of the indices are - // gibberish. - let assignments = partial_solution - .into_assignment(voter_at, target_at) - .map_err::<FeasibilityError, _>(Into::into)?; - - // Ensure that assignments are all correct. - let _ = assignments - .iter() - .map(|ref assignment| { - // Check that assignment.who is actually a voter (defensive-only). NOTE: while - // using the index map from `voter_index` is better than a blind linear search, - // this *still* has room for optimization. Note that we had the index when we - // did `solution -> assignment` and we lost it. Ideal is to keep the index - // around. - - // Defensive-only: must exist in the snapshot. - let snapshot_index = - voter_index(&assignment.who).ok_or(FeasibilityError::InvalidVoter)?; - // Defensive-only: index comes from the snapshot, must exist. - let (_voter, _stake, targets) = - snapshot_voters.get(snapshot_index).ok_or(FeasibilityError::InvalidVoter)?; - debug_assert!(*_voter == assignment.who); - - // Check that all of the targets are valid based on the snapshot. - if assignment.distribution.iter().any(|(t, _)| !targets.contains(t)) { - return Err(FeasibilityError::InvalidVote) - } - Ok(()) - }) - .collect::<Result<(), FeasibilityError>>()?; - - // ----- Start building support. First, we need one more closure. - let stake_of = helpers::stake_of_fn::<T, _>(&snapshot_voters, &cache); - - // This might fail if the normalization fails. Very unlikely. See `integrity_test`. - let staked_assignments = - sp_npos_elections::assignment_ratio_to_staked_normalized(assignments, stake_of) - .map_err::<FeasibilityError, _>(Into::into)?; - - let supports = sp_npos_elections::to_supports(&staked_assignments); - - // Ensure some heuristics. These conditions must hold in the **entire** support, this is - // just a single page. But, they must hold in a single page as well. let desired_targets = crate::Snapshot::<T>::desired_targets().ok_or(FeasibilityError::SnapshotUnavailable)?; - ensure!((supports.len() as u32) <= desired_targets, FeasibilityError::WrongWinnerCount); - - // almost-defensive-only: `MaxBackersPerWinner` is already checked. A sane value of - // `MaxWinnersPerPage` should be more than any possible value of `desired_targets()`, which - // is ALSO checked, so this conversion can almost never fail. - let bounded_supports = - supports.try_into().map_err(|_| FeasibilityError::FailedToBoundSupport)?; - Ok(bounded_supports) + + feasibility_check_page_inner_with_snapshot::<T::MinerConfig>( + partial_solution, + &snapshot_voters, + &snapshot_targets, + desired_targets, + ) + .and_then(|miner_supports| { + SupportsOfVerifier::<Self>::try_from_other_bounds(miner_supports) + .defensive_map_err(|_| FeasibilityError::FailedToBoundSupport) + }) } #[cfg(debug_assertions)] @@ -768,11 +742,82 @@ impl<T: Config> Pallet<T> { } } +/// Same as `feasibility_check_page_inner`, but with a snapshot. +/// +/// This is exported as a standalone function, relying on `MinerConfig` rather than `Config` so that +/// it can be used in any offchain miner. +pub fn feasibility_check_page_inner_with_snapshot<T: MinerConfig>( + partial_solution: SolutionOf<T>, + snapshot_voters: &BoundedVec<VoterOf<T>, T::VoterSnapshotPerBlock>, + snapshot_targets: &BoundedVec<T::AccountId, T::TargetSnapshotPerBlock>, + desired_targets: u32, +) -> Result<SupportsOfMiner<T>, FeasibilityError> { + // ----- Start building. First, we need some closures. + let cache = helpers::generate_voter_cache::<T, _>(snapshot_voters); + let voter_at = helpers::voter_at_fn::<T>(snapshot_voters); + let target_at = helpers::target_at_fn::<T>(snapshot_targets); + let voter_index = helpers::voter_index_fn_usize::<T>(&cache); + + // Then convert solution -> assignment. This will fail if any of the indices are + // gibberish. + let assignments = partial_solution + .into_assignment(voter_at, target_at) + .map_err::<FeasibilityError, _>(Into::into)?; + + // Ensure that assignments are all correct. + let _ = assignments + .iter() + .map(|ref assignment| { + // Check that assignment.who is actually a voter (defensive-only). NOTE: while + // using the index map from `voter_index` is better than a blind linear search, + // this *still* has room for optimization. Note that we had the index when we + // did `solution -> assignment` and we lost it. Ideal is to keep the index + // around. + + // Defensive-only: must exist in the snapshot. + let snapshot_index = + voter_index(&assignment.who).ok_or(FeasibilityError::InvalidVoter)?; + // Defensive-only: index comes from the snapshot, must exist. + let (_voter, _stake, targets) = + snapshot_voters.get(snapshot_index).ok_or(FeasibilityError::InvalidVoter)?; + debug_assert!(*_voter == assignment.who); + + // Check that all of the targets are valid based on the snapshot. + if assignment.distribution.iter().any(|(t, _)| !targets.contains(t)) { + return Err(FeasibilityError::InvalidVote) + } + Ok(()) + }) + .collect::<Result<(), FeasibilityError>>()?; + + // ----- Start building support. First, we need one more closure. + let stake_of = helpers::stake_of_fn::<T, _>(&snapshot_voters, &cache); + + // This might fail if the normalization fails. Very unlikely. See `integrity_test`. + let staked_assignments = + sp_npos_elections::assignment_ratio_to_staked_normalized(assignments, stake_of) + .map_err::<FeasibilityError, _>(Into::into)?; + + let supports = sp_npos_elections::to_supports(&staked_assignments); + + // Ensure some heuristics. These conditions must hold in the **entire** support, this is + // just a single page. But, they must hold in a single page as well. + ensure!((supports.len() as u32) <= desired_targets, FeasibilityError::WrongWinnerCount); + + // almost-defensive-only: `MaxBackersPerWinner` is already checked. A sane value of + // `MaxWinnersPerPage` should be more than any possible value of `desired_targets()`, which + // is ALSO checked, so this conversion can almost never fail. + let bounded_supports = + supports.try_into().map_err(|_| FeasibilityError::FailedToBoundSupport)?; + Ok(bounded_supports) +} + impl<T: Config> Verifier for Pallet<T> { type AccountId = T::AccountId; - type Solution = SolutionOf<T>; + type Solution = SolutionOf<T::MinerConfig>; type MaxBackersPerWinner = T::MaxBackersPerWinner; type MaxWinnersPerPage = T::MaxWinnersPerPage; + type MaxBackersPerWinnerFinal = T::MaxBackersPerWinnerFinal; fn set_minimum_score(score: ElectionScore) { MinimumScore::<T>::put(score); @@ -791,7 +836,7 @@ impl<T: Config> Verifier for Pallet<T> { <StatusStorage<T>>::put(Status::Nothing); } - fn get_queued_solution_page(page: PageIndex) -> Option<SupportsOf<Self>> { + fn get_queued_solution_page(page: PageIndex) -> Option<SupportsOfVerifier<Self>> { QueuedSolution::<T>::get_queued_solution_page(page) } @@ -799,7 +844,7 @@ impl<T: Config> Verifier for Pallet<T> { partial_solution: Self::Solution, claimed_score: ElectionScore, page: PageIndex, - ) -> Result<SupportsOf<Self>, FeasibilityError> { + ) -> Result<SupportsOfVerifier<Self>, FeasibilityError> { let maybe_current_score = Self::queued_score(); match Self::do_verify_synchronous(partial_solution, claimed_score, page) { Ok(supports) => { @@ -831,12 +876,12 @@ impl<T: Config> Verifier for Pallet<T> { fn feasibility_check_page( partial_solution: Self::Solution, page: PageIndex, - ) -> Result<SupportsOf<Self>, FeasibilityError> { + ) -> Result<SupportsOfVerifier<Self>, FeasibilityError> { Self::feasibility_check_page_inner(partial_solution, page) } fn force_set_single_page_valid( - partial_supports: SupportsOf<Self>, + partial_supports: SupportsOfVerifier<Self>, page: PageIndex, score: ElectionScore, ) { diff --git a/substrate/frame/election-provider-multi-block/src/verifier/mod.rs b/substrate/frame/election-provider-multi-block/src/verifier/mod.rs index f7b9a6100c7..512f37f351c 100644 --- a/substrate/frame/election-provider-multi-block/src/verifier/mod.rs +++ b/substrate/frame/election-provider-multi-block/src/verifier/mod.rs @@ -76,9 +76,9 @@ mod impls; mod tests; // internal imports -use crate::SupportsOf; use frame_election_provider_support::PageIndex; -pub use impls::{pallet::*, Status}; +use impls::SupportsOfVerifier; +pub use impls::{feasibility_check_page_inner_with_snapshot, pallet::*, Status}; use sp_core::Get; use sp_npos_elections::ElectionScore; use sp_runtime::RuntimeDebug; @@ -134,17 +134,17 @@ pub trait Verifier { /// The account if type. type AccountId; - /// Maximum number of backers that each winner could have. - /// - /// In multi-block verification, this can only be checked after all pages are known to be valid - /// and are already checked. - type MaxBackersPerWinner: Get<u32>; - /// Maximum number of winners that can be represented in each page. /// /// A reasonable value for this should be the maximum number of winners that the election user /// (e.g. the staking pallet) could ever desire. type MaxWinnersPerPage: Get<u32>; + /// Maximum number of backers, per winner, among all pages of an election. + /// + /// This can only be checked at the very final step of verification. + type MaxBackersPerWinnerFinal: Get<u32>; + /// Maximum number of backers that each winner could have, per page. + type MaxBackersPerWinner: Get<u32>; /// Set the minimum score that is acceptable for any solution. /// @@ -165,7 +165,7 @@ pub trait Verifier { /// /// It is the responsibility of the call site to call this function with all appropriate /// `page` arguments. - fn get_queued_solution_page(page: PageIndex) -> Option<SupportsOf<Self>>; + fn get_queued_solution_page(page: PageIndex) -> Option<SupportsOfVerifier<Self>>; /// Perform the feasibility check on the given single-page solution. /// @@ -182,7 +182,7 @@ pub trait Verifier { partial_solution: Self::Solution, claimed_score: ElectionScore, page: PageIndex, - ) -> Result<SupportsOf<Self>, FeasibilityError>; + ) -> Result<SupportsOfVerifier<Self>, FeasibilityError>; /// Just perform a single-page feasibility-check, based on the standards of this pallet, without /// writing anything to anywhere. @@ -191,14 +191,14 @@ pub trait Verifier { fn feasibility_check_page( partial_solution: Self::Solution, page: PageIndex, - ) -> Result<SupportsOf<Self>, FeasibilityError>; + ) -> Result<SupportsOfVerifier<Self>, FeasibilityError>; /// Force set a single page solution as the valid one. /// /// Will erase any previous solution. Should only be used in case of emergency fallbacks and /// similar. fn force_set_single_page_valid( - partial_supports: SupportsOf<Self>, + partial_supports: SupportsOfVerifier<Self>, page: PageIndex, score: ElectionScore, ); diff --git a/substrate/frame/election-provider-support/src/lib.rs b/substrate/frame/election-provider-support/src/lib.rs index 22e83630a6a..99d09f4a9b7 100644 --- a/substrate/frame/election-provider-support/src/lib.rs +++ b/substrate/frame/election-provider-support/src/lib.rs @@ -201,6 +201,7 @@ extern crate alloc; use alloc::{boxed::Box, vec::Vec}; use core::fmt::Debug; +use frame_support::traits::{Defensive, DefensiveResult}; use sp_core::ConstU32; use sp_runtime::{ traits::{Bounded, Saturating, Zero}, @@ -795,13 +796,16 @@ impl<AccountId, Bound: Get<u32>> TryFrom<sp_npos_elections::Support<AccountId>> } impl<AccountId: Clone, Bound: Get<u32>> BoundedSupport<AccountId, Bound> { - pub fn sorted_truncate_from(mut support: sp_npos_elections::Support<AccountId>) -> Self { + pub fn sorted_truncate_from(mut support: sp_npos_elections::Support<AccountId>) -> (Self, u32) { // If bounds meet, then short circuit. if let Ok(bounded) = support.clone().try_into() { - return bounded + return (bounded, 0) } + let pre_len = support.voters.len(); // sort support based on stake of each backer, low to high. + // Note: we don't sort high to low and truncate because we would have to track `total` + // updates, so we need one iteration anyhow. support.voters.sort_by(|a, b| a.1.cmp(&b.1)); // then do the truncation. let mut bounded = Self { voters: Default::default(), total: 0 }; @@ -811,7 +815,8 @@ impl<AccountId: Clone, Bound: Get<u32>> BoundedSupport<AccountId, Bound> { } bounded.total += weight; } - bounded + let post_len = bounded.voters.len(); + (bounded, (pre_len - post_len) as u32) } } @@ -829,20 +834,66 @@ pub struct BoundedSupports<AccountId, BOuter: Get<u32>, BInner: Get<u32>>( pub BoundedVec<(AccountId, BoundedSupport<AccountId, BInner>), BOuter>, ); +/// Try and build yourself from another `BoundedSupports` with a different set of types. +pub trait TryFromOtherBounds<AccountId, BOtherOuter: Get<u32>, BOtherInner: Get<u32>> { + fn try_from_other_bounds( + other: BoundedSupports<AccountId, BOtherOuter, BOtherInner>, + ) -> Result<Self, crate::Error> + where + Self: Sized; +} + +impl< + AccountId, + BOuter: Get<u32>, + BInner: Get<u32>, + BOtherOuter: Get<u32>, + BOuterInner: Get<u32>, + > TryFromOtherBounds<AccountId, BOtherOuter, BOuterInner> + for BoundedSupports<AccountId, BOuter, BInner> +{ + fn try_from_other_bounds( + other: BoundedSupports<AccountId, BOtherOuter, BOuterInner>, + ) -> Result<Self, crate::Error> { + // TODO: we might as well do this with unsafe rust and do it faster. + if BOtherOuter::get() <= BOuter::get() && BInner::get() <= BOuterInner::get() { + let supports = other + .into_iter() + .map(|(acc, b_support)| { + b_support + .try_into() + .defensive_map_err(|_| Error::BoundsExceeded) + .map(|b_support| (acc, b_support)) + }) + .collect::<Result<Vec<_>, _>>() + .defensive()?; + supports.try_into() + } else { + Err(crate::Error::BoundsExceeded) + } + } +} + impl<AccountId: Clone, BOuter: Get<u32>, BInner: Get<u32>> BoundedSupports<AccountId, BOuter, BInner> { - pub fn sorted_truncate_from(supports: Supports<AccountId>) -> Self { + /// Two u32s returned are number of winners and backers removed respectively. + pub fn sorted_truncate_from(supports: Supports<AccountId>) -> (Self, u32, u32) { // if bounds, meet, short circuit if let Ok(bounded) = supports.clone().try_into() { - return bounded + return (bounded, 0, 0) } + let pre_winners = supports.len(); + let mut backers_removed = 0; // first, convert all inner supports. let mut inner_supports = supports .into_iter() .map(|(account, support)| { - (account, BoundedSupport::<AccountId, BInner>::sorted_truncate_from(support)) + let (bounded, removed) = + BoundedSupport::<AccountId, BInner>::sorted_truncate_from(support); + backers_removed += removed; + (account, bounded) }) .collect::<Vec<_>>(); @@ -850,11 +901,12 @@ impl<AccountId: Clone, BOuter: Get<u32>, BInner: Get<u32>> inner_supports.sort_by(|a, b| b.1.total.cmp(&a.1.total)); // then take the first slice that can fit. - BoundedSupports( - BoundedVec::<(AccountId, BoundedSupport<AccountId, BInner>), BOuter>::truncate_from( - inner_supports, - ), - ) + let bounded = BoundedSupports(BoundedVec::< + (AccountId, BoundedSupport<AccountId, BInner>), + BOuter, + >::truncate_from(inner_supports)); + let post_winners = bounded.len(); + (bounded, (pre_winners - post_winners) as u32, backers_removed) } } pub trait TryFromUnboundedPagedSupports<AccountId, BOuter: Get<u32>, BInner: Get<u32>> { @@ -912,6 +964,15 @@ impl<AccountId: PartialEq, BOuter: Get<u32>, BInner: Get<u32>> PartialEq } } +impl<AccountId, BOuter: Get<u32>, BInner: Get<u32>> Into<Supports<AccountId>> + for BoundedSupports<AccountId, BOuter, BInner> +{ + fn into(self) -> Supports<AccountId> { + // TODO: can be done faster with unsafe code. + self.0.into_iter().map(|(acc, b_support)| (acc, b_support.into())).collect() + } +} + impl<AccountId, BOuter: Get<u32>, BInner: Get<u32>> From<BoundedVec<(AccountId, BoundedSupport<AccountId, BInner>), BOuter>> for BoundedSupports<AccountId, BOuter, BInner> diff --git a/substrate/frame/election-provider-support/src/onchain.rs b/substrate/frame/election-provider-support/src/onchain.rs index 3fe8f3b4bc3..3478eec6c9d 100644 --- a/substrate/frame/election-provider-support/src/onchain.rs +++ b/substrate/frame/election-provider-support/src/onchain.rs @@ -143,7 +143,9 @@ impl<T: Config> OnChainExecution<T> { let unbounded = to_supports(&staked); let bounded = if T::Sort::get() { - BoundedSupportsOf::<Self>::sorted_truncate_from(unbounded) + let (bounded, _winners_removed, _backers_removed) = + BoundedSupportsOf::<Self>::sorted_truncate_from(unbounded); + bounded } else { unbounded.try_into().map_err(|_| Error::FailedToBound)? }; @@ -282,12 +284,11 @@ mod tests { } mod mock_data_provider { + use super::*; + use crate::{data_provider, DataProviderBounds, PageIndex, VoterOf}; use frame_support::traits::ConstU32; use sp_runtime::bounded_vec; - use super::*; - use crate::{data_provider, PageIndex, VoterOf}; - pub struct DataProvider; impl ElectionDataProvider for DataProvider { type AccountId = AccountId; diff --git a/substrate/frame/election-provider-support/src/tests.rs b/substrate/frame/election-provider-support/src/tests.rs index b2bf223ed2f..de4bac3664b 100644 --- a/substrate/frame/election-provider-support/src/tests.rs +++ b/substrate/frame/election-provider-support/src/tests.rs @@ -461,7 +461,8 @@ fn sorted_truncate_from_works() { (3, Support { total: 406, voters: vec![(100, 100), (101, 101), (102, 102), (103, 103)] }), ]; - let bounded = BoundedSupports::<u32, ConstU32<2>, ConstU32<2>>::sorted_truncate_from(supports); + let (bounded, winners_removed, backers_removed) = + BoundedSupports::<u32, ConstU32<2>, ConstU32<2>>::sorted_truncate_from(supports); // we trim 2 as it has least total support, and trim backers based on stake. assert_eq!( bounded @@ -474,4 +475,6 @@ fn sorted_truncate_from_works() { (1, Support { total: 203, voters: vec![(102, 102), (101, 101)] }) ] ); + assert_eq!(winners_removed, 1); + assert_eq!(backers_removed, 3); } -- GitLab