// Copyright 2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity Bridges Common is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . //! Substrate GRANDPA Pallet //! //! This pallet is an on-chain GRANDPA light client for Substrate based chains. //! //! This pallet achieves this by trustlessly verifying GRANDPA finality proofs on-chain. Once //! verified, finalized headers are stored in the pallet, thereby creating a sparse header chain. //! This sparse header chain can be used as a source of truth for other higher-level applications. //! //! The pallet is responsible for tracking GRANDPA validator set hand-offs. We only import headers //! with justifications signed by the current validator set we know of. The header is inspected for //! a `ScheduledChanges` digest item, which is then used to update to next validator set. //! //! Since this pallet only tracks finalized headers it does not deal with forks. Forks can only //! occur if the GRANDPA validator set on the bridged chain is either colluding or there is a severe //! bug causing resulting in an equivocation. Such events are outside the scope of this pallet. //! Shall the fork occur on the bridged chain governance intervention will be required to //! re-initialize the bridge and track the right fork. #![cfg_attr(not(feature = "std"), no_std)] // Runtime-generated enums #![allow(clippy::large_enum_variant)] pub use storage_types::StoredAuthoritySet; use bp_header_chain::{ justification::GrandpaJustification, ChainWithGrandpa, HeaderChain, InitializationData, StoredHeaderData, StoredHeaderDataBuilder, }; use bp_runtime::{BlockNumberOf, HashOf, HasherOf, HeaderId, HeaderOf, OwnedBridgeModule}; use finality_grandpa::voter_set::VoterSet; use frame_support::{dispatch::PostDispatchInfo, ensure}; use sp_consensus_grandpa::{ConsensusLog, GRANDPA_ENGINE_ID}; use sp_runtime::{ traits::{Header as HeaderT, Zero}, SaturatedConversion, }; use sp_std::{boxed::Box, convert::TryInto}; mod call_ext; #[cfg(test)] mod mock; mod storage_types; /// Module, containing weights for this pallet. pub mod weights; #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking; // Re-export in crate namespace for `construct_runtime!` pub use call_ext::*; pub use pallet::*; pub use weights::WeightInfo; /// The target that will be used when publishing logs related to this pallet. pub const LOG_TARGET: &str = "runtime::bridge-grandpa"; /// Bridged chain from the pallet configuration. pub type BridgedChain = >::BridgedChain; /// Block number of the bridged chain. pub type BridgedBlockNumber = BlockNumberOf<>::BridgedChain>; /// Block hash of the bridged chain. pub type BridgedBlockHash = HashOf<>::BridgedChain>; /// Block id of the bridged chain. pub type BridgedBlockId = HeaderId, BridgedBlockNumber>; /// Hasher of the bridged chain. pub type BridgedBlockHasher = HasherOf<>::BridgedChain>; /// Header of the bridged chain. pub type BridgedHeader = HeaderOf<>::BridgedChain>; /// Header data of the bridged chain that is stored at this chain by this pallet. pub type BridgedStoredHeaderData = StoredHeaderData, BridgedBlockHash>; #[frame_support::pallet] pub mod pallet { use super::*; use bp_runtime::BasicOperatingMode; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; #[pallet::config] pub trait Config: frame_system::Config { /// The chain we are bridging to here. type BridgedChain: ChainWithGrandpa; /// The upper bound on the number of requests allowed by the pallet. /// /// A request refers to an action which writes a header to storage. /// /// Once this bound is reached the pallet will not allow any dispatchables to be called /// until the request count has decreased. #[pallet::constant] type MaxRequests: Get; /// Maximal number of finalized headers to keep in the storage. /// /// The setting is there to prevent growing the on-chain state indefinitely. Note /// the setting does not relate to block numbers - we will simply keep as much items /// in the storage, so it doesn't guarantee any fixed timeframe for finality headers. /// /// Incautious change of this constant may lead to orphan entries in the runtime storage. #[pallet::constant] type HeadersToKeep: Get; /// Weights gathered through benchmarking. type WeightInfo: WeightInfo; } #[pallet::pallet] pub struct Pallet(PhantomData<(T, I)>); #[pallet::hooks] impl, I: 'static> Hooks> for Pallet { fn on_initialize(_n: T::BlockNumber) -> frame_support::weights::Weight { >::mutate(|count| *count = count.saturating_sub(1)); T::DbWeight::get().reads_writes(1, 1) } } impl, I: 'static> OwnedBridgeModule for Pallet { const LOG_TARGET: &'static str = LOG_TARGET; type OwnerStorage = PalletOwner; type OperatingMode = BasicOperatingMode; type OperatingModeStorage = PalletOperatingMode; } #[pallet::call] impl, I: 'static> Pallet { /// Verify a target header is finalized according to the given finality proof. /// /// It will use the underlying storage pallet to fetch information about the current /// authorities and best finalized header in order to verify that the header is finalized. /// /// If successful in verification, it will write the target header to the underlying storage /// pallet. #[pallet::call_index(0)] #[pallet::weight(::submit_finality_proof( justification.commit.precommits.len().saturated_into(), justification.votes_ancestries.len().saturated_into(), ))] pub fn submit_finality_proof( _origin: OriginFor, finality_target: Box>, justification: GrandpaJustification>, ) -> DispatchResultWithPostInfo { Self::ensure_not_halted().map_err(Error::::BridgeModule)?; ensure!(Self::request_count() < T::MaxRequests::get(), >::TooManyRequests); let (hash, number) = (finality_target.hash(), finality_target.number()); log::trace!( target: LOG_TARGET, "Going to try and finalize header {:?}", finality_target ); SubmitFinalityProofHelper::::check_obsolete(*number)?; let authority_set = >::get(); let unused_proof_size = authority_set.unused_proof_size(); let set_id = authority_set.set_id; verify_justification::(&justification, hash, *number, authority_set.into())?; let is_authorities_change_enacted = try_enact_authority_change::(&finality_target, set_id)?; let may_refund_call_fee = is_authorities_change_enacted && submit_finality_proof_info_from_args::(&finality_target, &justification) .fits_limits(); >::mutate(|count| *count += 1); insert_header::(*finality_target, hash); log::info!( target: LOG_TARGET, "Successfully imported finalized header with hash {:?}!", hash ); // mandatory header is a header that changes authorities set. The pallet can't go // further without importing this header. So every bridge MUST import mandatory headers. // // We don't want to charge extra costs for mandatory operations. So relayer is not // paying fee for mandatory headers import transactions. // // If size/weight of the call is exceeds our estimated limits, the relayer still needs // to pay for the transaction. let pays_fee = if may_refund_call_fee { Pays::No } else { Pays::Yes }; // the proof size component of the call weight assumes that there are // `MaxBridgedAuthorities` in the `CurrentAuthoritySet` (we use `MaxEncodedLen` // estimation). But if their number is lower, then we may "refund" some `proof_size`, // making proof smaller and leaving block space to other useful transactions let pre_dispatch_weight = T::WeightInfo::submit_finality_proof( justification.commit.precommits.len().saturated_into(), justification.votes_ancestries.len().saturated_into(), ); let actual_weight = pre_dispatch_weight .set_proof_size(pre_dispatch_weight.proof_size().saturating_sub(unused_proof_size)); Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee }) } /// Bootstrap the bridge pallet with an initial header and authority set from which to sync. /// /// The initial configuration provided does not need to be the genesis header of the bridged /// chain, it can be any arbitrary header. You can also provide the next scheduled set /// change if it is already know. /// /// This function is only allowed to be called from a trusted origin and writes to storage /// with practically no checks in terms of the validity of the data. It is important that /// you ensure that valid data is being passed in. #[pallet::call_index(1)] #[pallet::weight((T::DbWeight::get().reads_writes(2, 5), DispatchClass::Operational))] pub fn initialize( origin: OriginFor, init_data: super::InitializationData>, ) -> DispatchResultWithPostInfo { Self::ensure_owner_or_root(origin)?; let init_allowed = !>::exists(); ensure!(init_allowed, >::AlreadyInitialized); initialize_bridge::(init_data.clone())?; log::info!( target: LOG_TARGET, "Pallet has been initialized with the following parameters: {:?}", init_data ); Ok(().into()) } /// Change `PalletOwner`. /// /// May only be called either by root, or by `PalletOwner`. #[pallet::call_index(2)] #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] pub fn set_owner(origin: OriginFor, new_owner: Option) -> DispatchResult { >::set_owner(origin, new_owner) } /// Halt or resume all pallet operations. /// /// May only be called either by root, or by `PalletOwner`. #[pallet::call_index(3)] #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] pub fn set_operating_mode( origin: OriginFor, operating_mode: BasicOperatingMode, ) -> DispatchResult { >::set_operating_mode(origin, operating_mode) } } /// The current number of requests which have written to storage. /// /// If the `RequestCount` hits `MaxRequests`, no more calls will be allowed to the pallet until /// the request capacity is increased. /// /// The `RequestCount` is decreased by one at the beginning of every block. This is to ensure /// that the pallet can always make progress. #[pallet::storage] #[pallet::getter(fn request_count)] pub(super) type RequestCount, I: 'static = ()> = StorageValue<_, u32, ValueQuery>; /// Hash of the header used to bootstrap the pallet. #[pallet::storage] pub(super) type InitialHash, I: 'static = ()> = StorageValue<_, BridgedBlockHash, ValueQuery>; /// Hash of the best finalized header. #[pallet::storage] #[pallet::getter(fn best_finalized)] pub type BestFinalized, I: 'static = ()> = StorageValue<_, BridgedBlockId, OptionQuery>; /// A ring buffer of imported hashes. Ordered by the insertion time. #[pallet::storage] pub(super) type ImportedHashes, I: 'static = ()> = StorageMap< Hasher = Identity, Key = u32, Value = BridgedBlockHash, QueryKind = OptionQuery, OnEmpty = GetDefault, MaxValues = MaybeHeadersToKeep, >; /// Current ring buffer position. #[pallet::storage] pub(super) type ImportedHashesPointer, I: 'static = ()> = StorageValue<_, u32, ValueQuery>; /// Relevant fields of imported headers. #[pallet::storage] pub type ImportedHeaders, I: 'static = ()> = StorageMap< Hasher = Identity, Key = BridgedBlockHash, Value = BridgedStoredHeaderData, QueryKind = OptionQuery, OnEmpty = GetDefault, MaxValues = MaybeHeadersToKeep, >; /// The current GRANDPA Authority set. #[pallet::storage] pub type CurrentAuthoritySet, I: 'static = ()> = StorageValue<_, StoredAuthoritySet, ValueQuery>; /// Optional pallet owner. /// /// Pallet owner has a right to halt all pallet operations and then resume it. If it is /// `None`, then there are no direct ways to halt/resume pallet operations, but other /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt /// flag directly or call the `halt_operations`). #[pallet::storage] pub type PalletOwner, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; /// The current operating mode of the pallet. /// /// Depending on the mode either all, or no transactions will be allowed. #[pallet::storage] pub type PalletOperatingMode, I: 'static = ()> = StorageValue<_, BasicOperatingMode, ValueQuery>; #[pallet::genesis_config] pub struct GenesisConfig, I: 'static = ()> { /// Optional module owner account. pub owner: Option, /// Optional module initialization data. pub init_data: Option>>, } #[cfg(feature = "std")] impl, I: 'static> Default for GenesisConfig { fn default() -> Self { Self { owner: None, init_data: None } } } #[pallet::genesis_build] impl, I: 'static> GenesisBuild for GenesisConfig { fn build(&self) { if let Some(ref owner) = self.owner { >::put(owner); } if let Some(init_data) = self.init_data.clone() { initialize_bridge::(init_data).expect("genesis config is correct; qed"); } else { // Since the bridge hasn't been initialized we shouldn't allow anyone to perform // transactions. >::put(BasicOperatingMode::Halted); } } } #[pallet::error] pub enum Error { /// The given justification is invalid for the given header. InvalidJustification, /// The authority set from the underlying header chain is invalid. InvalidAuthoritySet, /// There are too many requests for the current window to handle. TooManyRequests, /// The header being imported is older than the best finalized header known to the pallet. OldHeader, /// The scheduled authority set change found in the header is unsupported by the pallet. /// /// This is the case for non-standard (e.g forced) authority set changes. UnsupportedScheduledChange, /// The pallet is not yet initialized. NotInitialized, /// The pallet has already been initialized. AlreadyInitialized, /// Too many authorities in the set. TooManyAuthoritiesInSet, /// Error generated by the `OwnedBridgeModule` trait. BridgeModule(bp_runtime::OwnedBridgeModuleError), } /// Check the given header for a GRANDPA scheduled authority set change. If a change /// is found it will be enacted immediately. /// /// This function does not support forced changes, or scheduled changes with delays /// since these types of changes are indicative of abnormal behavior from GRANDPA. /// /// Returned value will indicate if a change was enacted or not. pub(crate) fn try_enact_authority_change, I: 'static>( header: &BridgedHeader, current_set_id: sp_consensus_grandpa::SetId, ) -> Result { let mut change_enacted = false; // We don't support forced changes - at that point governance intervention is required. ensure!( super::find_forced_change(header).is_none(), >::UnsupportedScheduledChange ); if let Some(change) = super::find_scheduled_change(header) { // GRANDPA only includes a `delay` for forced changes, so this isn't valid. ensure!(change.delay == Zero::zero(), >::UnsupportedScheduledChange); // TODO [#788]: Stop manually increasing the `set_id` here. let next_authorities = StoredAuthoritySet:: { authorities: change .next_authorities .try_into() .map_err(|_| Error::::TooManyAuthoritiesInSet)?, set_id: current_set_id + 1, }; // Since our header schedules a change and we know the delay is 0, it must also enact // the change. >::put(&next_authorities); change_enacted = true; log::info!( target: LOG_TARGET, "Transitioned from authority set {} to {}! New authorities are: {:?}", current_set_id, current_set_id + 1, next_authorities, ); }; Ok(change_enacted) } /// Verify a GRANDPA justification (finality proof) for a given header. /// /// Will use the GRANDPA current authorities known to the pallet. /// /// If successful it returns the decoded GRANDPA justification so we can refund any weight which /// was overcharged in the initial call. pub(crate) fn verify_justification, I: 'static>( justification: &GrandpaJustification>, hash: BridgedBlockHash, number: BridgedBlockNumber, authority_set: bp_header_chain::AuthoritySet, ) -> Result<(), sp_runtime::DispatchError> { use bp_header_chain::justification::verify_justification; let voter_set = VoterSet::new(authority_set.authorities).ok_or(>::InvalidAuthoritySet)?; let set_id = authority_set.set_id; Ok(verify_justification::>( (hash, number), set_id, &voter_set, justification, ) .map_err(|e| { log::error!( target: LOG_TARGET, "Received invalid justification for {:?}: {:?}", hash, e, ); >::InvalidJustification })?) } /// Import a previously verified header to the storage. /// /// Note this function solely takes care of updating the storage and pruning old entries, /// but does not verify the validity of such import. pub(crate) fn insert_header, I: 'static>( header: BridgedHeader, hash: BridgedBlockHash, ) { let index = >::get(); let pruning = >::try_get(index); >::put(HeaderId(*header.number(), hash)); >::insert(hash, header.build()); >::insert(index, hash); // Update ring buffer pointer and remove old header. >::put((index + 1) % T::HeadersToKeep::get()); if let Ok(hash) = pruning { log::debug!(target: LOG_TARGET, "Pruning old header: {:?}.", hash); >::remove(hash); } } /// Since this writes to storage with no real checks this should only be used in functions that /// were called by a trusted origin. pub(crate) fn initialize_bridge, I: 'static>( init_params: super::InitializationData>, ) -> Result<(), Error> { let super::InitializationData { header, authority_list, set_id, operating_mode } = init_params; let authority_set_length = authority_list.len(); let authority_set = StoredAuthoritySet::::try_new(authority_list, set_id) .map_err(|e| { log::error!( target: LOG_TARGET, "Failed to initialize bridge. Number of authorities in the set {} is larger than the configured value {}", authority_set_length, T::BridgedChain::MAX_AUTHORITIES_COUNT, ); e })?; let initial_hash = header.hash(); >::put(initial_hash); >::put(0); insert_header::(*header, initial_hash); >::put(authority_set); >::put(operating_mode); Ok(()) } /// Adapter for using `Config::HeadersToKeep` as `MaxValues` bound in our storage maps. pub struct MaybeHeadersToKeep(PhantomData<(T, I)>); // this implementation is required to use the struct as `MaxValues` impl, I: 'static> Get> for MaybeHeadersToKeep { fn get() -> Option { Some(T::HeadersToKeep::get()) } } /// Initialize pallet so that it is ready for inserting new header. /// /// The function makes sure that the new insertion will cause the pruning of some old header. /// /// Returns parent header for the new header. #[cfg(feature = "runtime-benchmarks")] pub(crate) fn bootstrap_bridge, I: 'static>( init_params: super::InitializationData>, ) -> BridgedHeader { let start_header = init_params.header.clone(); initialize_bridge::(init_params).expect("benchmarks are correct"); // the most obvious way to cause pruning during next insertion would be to insert // `HeadersToKeep` headers. But it'll make our benchmarks slow. So we will just play with // our pruning ring-buffer. assert_eq!(ImportedHashesPointer::::get(), 1); ImportedHashesPointer::::put(0); *start_header } } impl, I: 'static> Pallet { /// Get the best finalized block number. pub fn best_finalized_number() -> Option> { BestFinalized::::get().map(|id| id.number()) } } /// Bridge GRANDPA pallet as header chain. pub type GrandpaChainHeaders = Pallet; impl, I: 'static> HeaderChain> for GrandpaChainHeaders { fn finalized_header_state_root( header_hash: HashOf>, ) -> Option>> { ImportedHeaders::::get(header_hash).map(|h| h.state_root) } } pub(crate) fn find_scheduled_change( header: &H, ) -> Option> { use sp_runtime::generic::OpaqueDigestItemId; let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); let filter_log = |log: ConsensusLog| match log { ConsensusLog::ScheduledChange(change) => Some(change), _ => None, }; // find the first consensus digest with the right ID which converts to // the right kind of consensus log. header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) } /// Checks the given header for a consensus digest signaling a **forced** scheduled change and /// extracts it. pub(crate) fn find_forced_change( header: &H, ) -> Option<(H::Number, sp_consensus_grandpa::ScheduledChange)> { use sp_runtime::generic::OpaqueDigestItemId; let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); let filter_log = |log: ConsensusLog| match log { ConsensusLog::ForcedChange(delay, change) => Some((delay, change)), _ => None, }; // find the first consensus digest with the right ID which converts to // the right kind of consensus log. header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) } /// (Re)initialize bridge with given header for using it in `pallet-bridge-messages` benchmarks. #[cfg(feature = "runtime-benchmarks")] pub fn initialize_for_benchmarks, I: 'static>(header: BridgedHeader) { initialize_bridge::(InitializationData { header: Box::new(header), authority_list: sp_std::vec::Vec::new(), /* we don't verify any proofs in external * benchmarks */ set_id: 0, operating_mode: bp_runtime::BasicOperatingMode::Normal, }) .expect("only used from benchmarks; benchmarks are correct; qed"); } #[cfg(test)] mod tests { use super::*; use crate::mock::{ run_test, test_header, RuntimeOrigin, TestBridgedChain, TestHeader, TestNumber, TestRuntime, MAX_BRIDGED_AUTHORITIES, }; use bp_header_chain::BridgeGrandpaCall; use bp_runtime::BasicOperatingMode; use bp_test_utils::{ authority_list, generate_owned_bridge_module_tests, make_default_justification, make_justification_for_header, JustificationGeneratorParams, ALICE, BOB, }; use codec::Encode; use frame_support::{ assert_err, assert_noop, assert_ok, dispatch::PostDispatchInfo, storage::generator::StorageValue, }; use sp_core::Get; use sp_runtime::{Digest, DigestItem, DispatchError}; fn initialize_substrate_bridge() { assert_ok!(init_with_origin(RuntimeOrigin::root())); } fn init_with_origin( origin: RuntimeOrigin, ) -> Result< InitializationData, sp_runtime::DispatchErrorWithPostInfo, > { let genesis = test_header(0); let init_data = InitializationData { header: Box::new(genesis), authority_list: authority_list(), set_id: 1, operating_mode: BasicOperatingMode::Normal, }; Pallet::::initialize(origin, init_data.clone()).map(|_| init_data) } fn submit_finality_proof(header: u8) -> frame_support::dispatch::DispatchResultWithPostInfo { let header = test_header(header.into()); let justification = make_default_justification(&header); Pallet::::submit_finality_proof( RuntimeOrigin::signed(1), Box::new(header), justification, ) } fn next_block() { use frame_support::traits::OnInitialize; let current_number = frame_system::Pallet::::block_number(); frame_system::Pallet::::set_block_number(current_number + 1); let _ = Pallet::::on_initialize(current_number); } fn change_log(delay: u64) -> Digest { let consensus_log = ConsensusLog::::ScheduledChange(sp_consensus_grandpa::ScheduledChange { next_authorities: vec![(ALICE.into(), 1), (BOB.into(), 1)], delay, }); Digest { logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())] } } fn forced_change_log(delay: u64) -> Digest { let consensus_log = ConsensusLog::::ForcedChange( delay, sp_consensus_grandpa::ScheduledChange { next_authorities: vec![(ALICE.into(), 1), (BOB.into(), 1)], delay, }, ); Digest { logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())] } } fn many_authorities_log() -> Digest { let consensus_log = ConsensusLog::::ScheduledChange(sp_consensus_grandpa::ScheduledChange { next_authorities: std::iter::repeat((ALICE.into(), 1)) .take(MAX_BRIDGED_AUTHORITIES as usize + 1) .collect(), delay: 0, }); Digest { logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())] } } #[test] fn init_root_or_owner_origin_can_initialize_pallet() { run_test(|| { assert_noop!(init_with_origin(RuntimeOrigin::signed(1)), DispatchError::BadOrigin); assert_ok!(init_with_origin(RuntimeOrigin::root())); // Reset storage so we can initialize the pallet again BestFinalized::::kill(); PalletOwner::::put(2); assert_ok!(init_with_origin(RuntimeOrigin::signed(2))); }) } #[test] fn init_storage_entries_are_correctly_initialized() { run_test(|| { assert_eq!(BestFinalized::::get(), None,); assert_eq!(Pallet::::best_finalized(), None); let init_data = init_with_origin(RuntimeOrigin::root()).unwrap(); assert!(>::contains_key(init_data.header.hash())); assert_eq!(BestFinalized::::get().unwrap().1, init_data.header.hash()); assert_eq!( CurrentAuthoritySet::::get().authorities, init_data.authority_list ); assert_eq!(PalletOperatingMode::::get(), BasicOperatingMode::Normal); }) } #[test] fn init_can_only_initialize_pallet_once() { run_test(|| { initialize_substrate_bridge(); assert_noop!( init_with_origin(RuntimeOrigin::root()), >::AlreadyInitialized ); }) } #[test] fn init_fails_if_there_are_too_many_authorities_in_the_set() { run_test(|| { let genesis = test_header(0); let init_data = InitializationData { header: Box::new(genesis), authority_list: std::iter::repeat(authority_list().remove(0)) .take(MAX_BRIDGED_AUTHORITIES as usize + 1) .collect(), set_id: 1, operating_mode: BasicOperatingMode::Normal, }; assert_noop!( Pallet::::initialize(RuntimeOrigin::root(), init_data), Error::::TooManyAuthoritiesInSet, ); }); } #[test] fn pallet_rejects_transactions_if_halted() { run_test(|| { initialize_substrate_bridge(); assert_ok!(Pallet::::set_operating_mode( RuntimeOrigin::root(), BasicOperatingMode::Halted )); assert_noop!( submit_finality_proof(1), Error::::BridgeModule(bp_runtime::OwnedBridgeModuleError::Halted) ); assert_ok!(Pallet::::set_operating_mode( RuntimeOrigin::root(), BasicOperatingMode::Normal )); assert_ok!(submit_finality_proof(1)); }) } #[test] fn pallet_rejects_header_if_not_initialized_yet() { run_test(|| { assert_noop!(submit_finality_proof(1), Error::::NotInitialized); }); } #[test] fn succesfully_imports_header_with_valid_finality() { run_test(|| { initialize_substrate_bridge(); let header_number = 1; let header = test_header(header_number.into()); let justification = make_default_justification(&header); let pre_dispatch_weight = ::WeightInfo::submit_finality_proof( justification.commit.precommits.len().try_into().unwrap_or(u32::MAX), justification.votes_ancestries.len().try_into().unwrap_or(u32::MAX), ); let result = submit_finality_proof(header_number); assert_ok!(result); assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::Yes); // our test config assumes 2048 max authorities and we are just using couple let pre_dispatch_proof_size = pre_dispatch_weight.proof_size(); let actual_proof_size = result.unwrap().actual_weight.unwrap().proof_size(); assert!(actual_proof_size > 0); assert!( actual_proof_size < pre_dispatch_proof_size, "Actual proof size {actual_proof_size} must be less than the pre-dispatch {pre_dispatch_proof_size}", ); let header = test_header(1); assert_eq!(>::get().unwrap().1, header.hash()); assert!(>::contains_key(header.hash())); }) } #[test] fn rejects_justification_that_skips_authority_set_transition() { run_test(|| { initialize_substrate_bridge(); let header = test_header(1); let params = JustificationGeneratorParams:: { set_id: 2, ..Default::default() }; let justification = make_justification_for_header(params); assert_err!( Pallet::::submit_finality_proof( RuntimeOrigin::signed(1), Box::new(header), justification, ), >::InvalidJustification ); }) } #[test] fn does_not_import_header_with_invalid_finality_proof() { run_test(|| { initialize_substrate_bridge(); let header = test_header(1); let mut justification = make_default_justification(&header); justification.round = 42; assert_err!( Pallet::::submit_finality_proof( RuntimeOrigin::signed(1), Box::new(header), justification, ), >::InvalidJustification ); }) } #[test] fn disallows_invalid_authority_set() { run_test(|| { let genesis = test_header(0); let invalid_authority_list = vec![(ALICE.into(), u64::MAX), (BOB.into(), u64::MAX)]; let init_data = InitializationData { header: Box::new(genesis), authority_list: invalid_authority_list, set_id: 1, operating_mode: BasicOperatingMode::Normal, }; assert_ok!(Pallet::::initialize(RuntimeOrigin::root(), init_data)); let header = test_header(1); let justification = make_default_justification(&header); assert_err!( Pallet::::submit_finality_proof( RuntimeOrigin::signed(1), Box::new(header), justification, ), >::InvalidAuthoritySet ); }) } #[test] fn importing_header_ensures_that_chain_is_extended() { run_test(|| { initialize_substrate_bridge(); assert_ok!(submit_finality_proof(4)); assert_err!(submit_finality_proof(3), Error::::OldHeader); assert_ok!(submit_finality_proof(5)); }) } #[test] fn importing_header_enacts_new_authority_set() { run_test(|| { initialize_substrate_bridge(); let next_set_id = 2; let next_authorities = vec![(ALICE.into(), 1), (BOB.into(), 1)]; // Need to update the header digest to indicate that our header signals an authority set // change. The change will be enacted when we import our header. let mut header = test_header(2); header.digest = change_log(0); // Create a valid justification for the header let justification = make_default_justification(&header); // Let's import our test header let result = Pallet::::submit_finality_proof( RuntimeOrigin::signed(1), Box::new(header.clone()), justification, ); assert_ok!(result); assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::No); // Make sure that our header is the best finalized assert_eq!(>::get().unwrap().1, header.hash()); assert!(>::contains_key(header.hash())); // Make sure that the authority set actually changed upon importing our header assert_eq!( >::get(), StoredAuthoritySet::::try_new(next_authorities, next_set_id) .unwrap(), ); }) } #[test] fn relayer_pays_tx_fee_when_submitting_huge_mandatory_header() { run_test(|| { initialize_substrate_bridge(); // let's prepare a huge authorities change header, which is definitely above size limits let mut header = test_header(2); header.digest = change_log(0); header.digest.push(DigestItem::Other(vec![42u8; 1024 * 1024])); let justification = make_default_justification(&header); // without large digest item ^^^ the relayer would have paid zero transaction fee // (`Pays::No`) let result = Pallet::::submit_finality_proof( RuntimeOrigin::signed(1), Box::new(header.clone()), justification, ); assert_ok!(result); assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::Yes); // Make sure that our header is the best finalized assert_eq!(>::get().unwrap().1, header.hash()); assert!(>::contains_key(header.hash())); }) } #[test] fn relayer_pays_tx_fee_when_submitting_justification_with_long_ancestry_votes() { run_test(|| { initialize_substrate_bridge(); // let's prepare a huge authorities change header, which is definitely above weight // limits let mut header = test_header(2); header.digest = change_log(0); let justification = make_justification_for_header(JustificationGeneratorParams { header: header.clone(), ancestors: TestBridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY + 1, ..Default::default() }); // without many headers in votes ancestries ^^^ the relayer would have paid zero // transaction fee (`Pays::No`) let result = Pallet::::submit_finality_proof( RuntimeOrigin::signed(1), Box::new(header.clone()), justification, ); assert_ok!(result); assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::Yes); // Make sure that our header is the best finalized assert_eq!(>::get().unwrap().1, header.hash()); assert!(>::contains_key(header.hash())); }) } #[test] fn importing_header_rejects_header_with_scheduled_change_delay() { run_test(|| { initialize_substrate_bridge(); // Need to update the header digest to indicate that our header signals an authority set // change. However, the change doesn't happen until the next block. let mut header = test_header(2); header.digest = change_log(1); // Create a valid justification for the header let justification = make_default_justification(&header); // Should not be allowed to import this header assert_err!( Pallet::::submit_finality_proof( RuntimeOrigin::signed(1), Box::new(header), justification ), >::UnsupportedScheduledChange ); }) } #[test] fn importing_header_rejects_header_with_forced_changes() { run_test(|| { initialize_substrate_bridge(); // Need to update the header digest to indicate that it signals a forced authority set // change. let mut header = test_header(2); header.digest = forced_change_log(0); // Create a valid justification for the header let justification = make_default_justification(&header); // Should not be allowed to import this header assert_err!( Pallet::::submit_finality_proof( RuntimeOrigin::signed(1), Box::new(header), justification ), >::UnsupportedScheduledChange ); }) } #[test] fn importing_header_rejects_header_with_too_many_authorities() { run_test(|| { initialize_substrate_bridge(); // Need to update the header digest to indicate that our header signals an authority set // change. However, the change doesn't happen until the next block. let mut header = test_header(2); header.digest = many_authorities_log(); // Create a valid justification for the header let justification = make_default_justification(&header); // Should not be allowed to import this header assert_err!( Pallet::::submit_finality_proof( RuntimeOrigin::signed(1), Box::new(header), justification ), >::TooManyAuthoritiesInSet ); }); } #[test] fn parse_finalized_storage_proof_rejects_proof_on_unknown_header() { run_test(|| { assert_noop!( Pallet::::parse_finalized_storage_proof( Default::default(), vec![], |_| (), ), bp_header_chain::HeaderChainError::UnknownHeader, ); }); } #[test] fn parse_finalized_storage_accepts_valid_proof() { run_test(|| { let (state_root, storage_proof) = bp_runtime::craft_valid_storage_proof(); let mut header = test_header(2); header.set_state_root(state_root); let hash = header.hash(); >::put(HeaderId(2, hash)); >::insert(hash, header.build()); assert_ok!( Pallet::::parse_finalized_storage_proof(hash, storage_proof, |_| (),), (), ); }); } #[test] fn rate_limiter_disallows_imports_once_limit_is_hit_in_single_block() { run_test(|| { initialize_substrate_bridge(); assert_ok!(submit_finality_proof(1)); assert_ok!(submit_finality_proof(2)); assert_err!(submit_finality_proof(3), >::TooManyRequests); }) } #[test] fn rate_limiter_invalid_requests_do_not_count_towards_request_count() { run_test(|| { let submit_invalid_request = || { let header = test_header(1); let mut invalid_justification = make_default_justification(&header); invalid_justification.round = 42; Pallet::::submit_finality_proof( RuntimeOrigin::signed(1), Box::new(header), invalid_justification, ) }; initialize_substrate_bridge(); for _ in 0..::MaxRequests::get() + 1 { // Notice that the error here *isn't* `TooManyRequests` assert_err!(submit_invalid_request(), >::InvalidJustification); } // Can still submit `MaxRequests` requests afterwards assert_ok!(submit_finality_proof(1)); assert_ok!(submit_finality_proof(2)); assert_err!(submit_finality_proof(3), >::TooManyRequests); }) } #[test] fn rate_limiter_allows_request_after_new_block_has_started() { run_test(|| { initialize_substrate_bridge(); assert_ok!(submit_finality_proof(1)); assert_ok!(submit_finality_proof(2)); next_block(); assert_ok!(submit_finality_proof(3)); }) } #[test] fn rate_limiter_disallows_imports_once_limit_is_hit_across_different_blocks() { run_test(|| { initialize_substrate_bridge(); assert_ok!(submit_finality_proof(1)); assert_ok!(submit_finality_proof(2)); next_block(); assert_ok!(submit_finality_proof(3)); assert_err!(submit_finality_proof(4), >::TooManyRequests); }) } #[test] fn rate_limiter_allows_max_requests_after_long_time_with_no_activity() { run_test(|| { initialize_substrate_bridge(); assert_ok!(submit_finality_proof(1)); assert_ok!(submit_finality_proof(2)); next_block(); next_block(); next_block(); assert_ok!(submit_finality_proof(5)); assert_ok!(submit_finality_proof(7)); }) } #[test] fn should_prune_headers_over_headers_to_keep_parameter() { run_test(|| { initialize_substrate_bridge(); assert_ok!(submit_finality_proof(1)); let first_header_hash = Pallet::::best_finalized().unwrap().hash(); next_block(); assert_ok!(submit_finality_proof(2)); next_block(); assert_ok!(submit_finality_proof(3)); next_block(); assert_ok!(submit_finality_proof(4)); next_block(); assert_ok!(submit_finality_proof(5)); next_block(); assert_ok!(submit_finality_proof(6)); assert!( !ImportedHeaders::::contains_key(first_header_hash), "First header should be pruned.", ); }) } #[test] fn storage_keys_computed_properly() { assert_eq!( PalletOperatingMode::::storage_value_final_key().to_vec(), bp_header_chain::storage_keys::pallet_operating_mode_key("Grandpa").0, ); assert_eq!( CurrentAuthoritySet::::storage_value_final_key().to_vec(), bp_header_chain::storage_keys::current_authority_set_key("Grandpa").0, ); assert_eq!( BestFinalized::::storage_value_final_key().to_vec(), bp_header_chain::storage_keys::best_finalized_key("Grandpa").0, ); } #[test] fn test_bridge_grandpa_call_is_correctly_defined() { let header = test_header(0); let init_data = InitializationData { header: Box::new(header.clone()), authority_list: authority_list(), set_id: 1, operating_mode: BasicOperatingMode::Normal, }; let justification = make_default_justification(&header); let direct_initialize_call = Call::::initialize { init_data: init_data.clone() }; let indirect_initialize_call = BridgeGrandpaCall::::initialize { init_data }; assert_eq!(direct_initialize_call.encode(), indirect_initialize_call.encode()); let direct_submit_finality_proof_call = Call::::submit_finality_proof { finality_target: Box::new(header.clone()), justification: justification.clone(), }; let indirect_submit_finality_proof_call = BridgeGrandpaCall::::submit_finality_proof { finality_target: Box::new(header), justification, }; assert_eq!( direct_submit_finality_proof_call.encode(), indirect_submit_finality_proof_call.encode() ); } generate_owned_bridge_module_tests!(BasicOperatingMode::Normal, BasicOperatingMode::Halted); #[test] fn maybe_headers_to_keep_returns_correct_value() { assert_eq!(MaybeHeadersToKeep::::get(), Some(mock::HeadersToKeep::get())); } }