// Copyright 2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity Bridges Common is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . //! Parachains finality module. //! //! This module needs to be deployed with GRANDPA module, which is syncing relay //! chain blocks. The main entry point of this module is `submit_parachain_heads`, which //! accepts storage proof of some parachain `Heads` entries from bridged relay chain. //! It requires corresponding relay headers to be already synced. #![cfg_attr(not(feature = "std"), no_std)] pub use weights::WeightInfo; pub use weights_ext::WeightInfoExt; use bp_header_chain::{HeaderChain, HeaderChainError}; use bp_parachains::{parachain_head_storage_key_at_source, ParaInfo, ParaStoredHeaderData}; use bp_polkadot_core::parachains::{ParaHash, ParaHead, ParaHeadsProof, ParaId}; use bp_runtime::{Chain, HashOf, HeaderId, HeaderIdOf, Parachain, StorageProofError}; use frame_support::dispatch::PostDispatchInfo; use sp_std::{marker::PhantomData, vec::Vec}; #[cfg(feature = "runtime-benchmarks")] use bp_parachains::ParaStoredHeaderDataBuilder; #[cfg(feature = "runtime-benchmarks")] use bp_runtime::HeaderOf; #[cfg(feature = "runtime-benchmarks")] use codec::Encode; // Re-export in crate namespace for `construct_runtime!`. pub use call_ext::*; pub use pallet::*; pub mod weights; pub mod weights_ext; #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking; mod call_ext; #[cfg(test)] mod mock; /// The target that will be used when publishing logs related to this pallet. pub const LOG_TARGET: &str = "runtime::bridge-parachains"; /// Block hash of the bridged relay chain. pub type RelayBlockHash = bp_polkadot_core::Hash; /// Block number of the bridged relay chain. pub type RelayBlockNumber = bp_polkadot_core::BlockNumber; /// Hasher of the bridged relay chain. pub type RelayBlockHasher = bp_polkadot_core::Hasher; /// Artifacts of the parachains head update. struct UpdateParachainHeadArtifacts { /// New best head of the parachain. pub best_head: ParaInfo, /// If `true`, some old parachain head has been pruned during update. pub prune_happened: bool, } #[frame_support::pallet] pub mod pallet { use super::*; use bp_parachains::{ BestParaHeadHash, ImportedParaHeadsKeyProvider, ParaStoredHeaderDataBuilder, ParasInfoKeyProvider, }; use bp_runtime::{ BasicOperatingMode, BoundedStorageValue, OwnedBridgeModule, StorageDoubleMapKeyProvider, StorageMapKeyProvider, }; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; /// Stored parachain head data of given parachains pallet. pub type StoredParaHeadDataOf = BoundedStorageValue<>::MaxParaHeadDataSize, ParaStoredHeaderData>; /// Weight info of the given parachains pallet. pub type WeightInfoOf = >::WeightInfo; #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event, I: 'static = ()> { /// The caller has provided head of parachain that the pallet is not configured to track. UntrackedParachainRejected { parachain: ParaId }, /// The caller has declared that he has provided given parachain head, but it is missing /// from the storage proof. MissingParachainHead { parachain: ParaId }, /// The caller has provided parachain head hash that is not matching the hash read from the /// storage proof. IncorrectParachainHeadHash { parachain: ParaId, parachain_head_hash: ParaHash, actual_parachain_head_hash: ParaHash, }, /// The caller has provided obsolete parachain head, which is already known to the pallet. RejectedObsoleteParachainHead { parachain: ParaId, parachain_head_hash: ParaHash }, /// The caller has provided parachain head that exceeds the maximal configured head size. RejectedLargeParachainHead { parachain: ParaId, parachain_head_hash: ParaHash, parachain_head_size: u32, }, /// Parachain head has been updated. UpdatedParachainHead { parachain: ParaId, parachain_head_hash: ParaHash }, } #[pallet::error] pub enum Error { /// Relay chain block hash is unknown to us. UnknownRelayChainBlock, /// The number of stored relay block is different from what the relayer has provided. InvalidRelayChainBlockNumber, /// Error generated by a method defined in `bp-header-chain`. HeaderChain(HeaderChainError), /// Given parachain head is unknown. UnknownParaHead, /// The storage proof doesn't contains storage root. So it is invalid for given header. StorageRootMismatch, /// Failed to extract state root from given parachain head. FailedToExtractStateRoot, /// Error generated by the `OwnedBridgeModule` trait. BridgeModule(bp_runtime::OwnedBridgeModuleError), } /// Convenience trait for defining `BridgedChain` bounds. pub trait BoundedBridgeGrandpaConfig: pallet_bridge_grandpa::Config { type BridgedRelayChain: Chain< BlockNumber = RelayBlockNumber, Hash = RelayBlockHash, Hasher = RelayBlockHasher, >; } impl BoundedBridgeGrandpaConfig for T where T: pallet_bridge_grandpa::Config, T::BridgedChain: Chain, { type BridgedRelayChain = T::BridgedChain; } #[pallet::config] #[pallet::disable_frame_system_supertrait_check] pub trait Config: BoundedBridgeGrandpaConfig { /// The overarching event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Benchmarks results from runtime we're plugged into. type WeightInfo: WeightInfoExt; /// Instance of bridges GRANDPA pallet (within this runtime) that this pallet is linked to. /// /// The GRANDPA pallet instance must be configured to import headers of relay chain that /// we're interested in. type BridgesGrandpaPalletInstance: 'static; /// Name of the original `paras` pallet in the `construct_runtime!()` call at the bridged /// chain. /// /// Please keep in mind that this should be the name of the `runtime_parachains::paras` /// pallet from polkadot repository, not the `pallet-bridge-parachains`. #[pallet::constant] type ParasPalletName: Get<&'static str>; /// Parachain head data builder. /// /// We never store parachain heads here, since they may be too big (e.g. because of large /// digest items). Instead we're using the same approach as `pallet-bridge-grandpa` /// pallet - we are only storing `bp_messages::StoredHeaderData` (number and state root), /// which is enough for our applications. However, we work with different parachains here /// and they can use different primitives (for block numbers and hash). So we can't store /// it directly. Instead, we're storing `bp_messages::StoredHeaderData` in SCALE-encoded /// form, wrapping it into `bp_parachains::ParaStoredHeaderData`. /// /// This builder helps to convert from `HeadData` to `bp_parachains::ParaStoredHeaderData`. type ParaStoredHeaderDataBuilder: ParaStoredHeaderDataBuilder; /// Maximal number of single parachain heads to keep in the storage. /// /// The setting is there to prevent growing the on-chain state indefinitely. Note /// the setting does not relate to parachain block numbers - we will simply keep as much /// items in the storage, so it doesn't guarantee any fixed timeframe for heads. /// /// Incautious change of this constant may lead to orphan entries in the runtime storage. #[pallet::constant] type HeadsToKeep: Get; /// Maximal size (in bytes) of the SCALE-encoded parachain head data /// (`bp_parachains::ParaStoredHeaderData`). /// /// Keep in mind that the size of any tracked parachain header data must not exceed this /// value. So if you're going to track multiple parachains, one of which is using large /// hashes, you shall choose this maximal value. /// /// There's no mandatory headers in this pallet, so it can't stall if there's some header /// that exceeds this bound. #[pallet::constant] type MaxParaHeadDataSize: Get; } /// Optional pallet owner. /// /// Pallet owner has a right to halt all pallet operations and then resume them. If it is /// `None`, then there are no direct ways to halt/resume pallet operations, but other /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt /// flag directly or call the `halt_operations`). #[pallet::storage] pub type PalletOwner, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; /// The current operating mode of the pallet. /// /// Depending on the mode either all, or no transactions will be allowed. #[pallet::storage] pub type PalletOperatingMode, I: 'static = ()> = StorageValue<_, BasicOperatingMode, ValueQuery>; /// Parachains info. /// /// Contains the following info: /// - best parachain head hash /// - the head of the `ImportedParaHashes` ring buffer #[pallet::storage] pub type ParasInfo, I: 'static = ()> = StorageMap< Hasher = ::Hasher, Key = ::Key, Value = ::Value, QueryKind = OptionQuery, OnEmpty = GetDefault, MaxValues = MaybeMaxParachains, >; /// State roots of parachain heads which have been imported into the pallet. #[pallet::storage] pub type ImportedParaHeads, I: 'static = ()> = StorageDoubleMap< Hasher1 = ::Hasher1, Key1 = ::Key1, Hasher2 = ::Hasher2, Key2 = ::Key2, Value = StoredParaHeadDataOf, QueryKind = OptionQuery, OnEmpty = GetDefault, MaxValues = MaybeMaxTotalParachainHashes, >; /// A ring buffer of imported parachain head hashes. Ordered by the insertion time. #[pallet::storage] pub(super) type ImportedParaHashes, I: 'static = ()> = StorageDoubleMap< Hasher1 = Blake2_128Concat, Key1 = ParaId, Hasher2 = Twox64Concat, Key2 = u32, Value = ParaHash, QueryKind = OptionQuery, OnEmpty = GetDefault, MaxValues = MaybeMaxTotalParachainHashes, >; #[pallet::pallet] pub struct Pallet(PhantomData<(T, I)>); impl, I: 'static> OwnedBridgeModule for Pallet { const LOG_TARGET: &'static str = LOG_TARGET; type OwnerStorage = PalletOwner; type OperatingMode = BasicOperatingMode; type OperatingModeStorage = PalletOperatingMode; } #[pallet::call] impl, I: 'static> Pallet { /// Submit proof of one or several parachain heads. /// /// The proof is supposed to be proof of some `Heads` entries from the /// `polkadot-runtime-parachains::paras` pallet instance, deployed at the bridged chain. /// The proof is supposed to be crafted at the `relay_header_hash` that must already be /// imported by corresponding GRANDPA pallet at this chain. #[pallet::call_index(0)] #[pallet::weight(WeightInfoOf::::submit_parachain_heads_weight( T::DbWeight::get(), parachain_heads_proof, parachains.len() as _, ))] pub fn submit_parachain_heads( _origin: OriginFor, at_relay_block: (RelayBlockNumber, RelayBlockHash), parachains: Vec<(ParaId, ParaHash)>, parachain_heads_proof: ParaHeadsProof, ) -> DispatchResultWithPostInfo { Self::ensure_not_halted().map_err(Error::::BridgeModule)?; // we'll need relay chain header to verify that parachains heads are always increasing. let (relay_block_number, relay_block_hash) = at_relay_block; let relay_block = pallet_bridge_grandpa::ImportedHeaders::< T, T::BridgesGrandpaPalletInstance, >::get(relay_block_hash) .ok_or(Error::::UnknownRelayChainBlock)?; ensure!( relay_block.number == relay_block_number, Error::::InvalidRelayChainBlockNumber, ); // now parse storage proof and read parachain heads let mut actual_weight = WeightInfoOf::::submit_parachain_heads_weight( T::DbWeight::get(), ¶chain_heads_proof, parachains.len() as _, ); pallet_bridge_grandpa::Pallet::::parse_finalized_storage_proof( relay_block_hash, parachain_heads_proof.0, move |mut storage| { for (parachain, parachain_head_hash) in parachains { let parachain_head = match Pallet::::read_parachain_head(&mut storage, parachain) { Ok(Some(parachain_head)) => parachain_head, Ok(None) => { log::trace!( target: LOG_TARGET, "The head of parachain {:?} is None. {}", parachain, if ParasInfo::::contains_key(parachain) { "Looks like it is not yet registered at the source relay chain" } else { "Looks like it has been deregistered from the source relay chain" }, ); Self::deposit_event(Event::MissingParachainHead { parachain }); continue; }, Err(e) => { log::trace!( target: LOG_TARGET, "The read of head of parachain {:?} has failed: {:?}", parachain, e, ); Self::deposit_event(Event::MissingParachainHead { parachain }); continue; }, }; // if relayer has specified invalid parachain head hash, ignore the head // (this isn't strictly necessary, but better safe than sorry) let actual_parachain_head_hash = parachain_head.hash(); if parachain_head_hash != actual_parachain_head_hash { log::trace!( target: LOG_TARGET, "The submitter has specified invalid parachain {:?} head hash: {:?} vs {:?}", parachain, parachain_head_hash, actual_parachain_head_hash, ); Self::deposit_event(Event::IncorrectParachainHeadHash { parachain, parachain_head_hash, actual_parachain_head_hash, }); continue; } // convert from parachain head into stored parachain head data let parachain_head_data = match T::ParaStoredHeaderDataBuilder::try_build( parachain, ¶chain_head, ) { Some(parachain_head_data) => parachain_head_data, None => { log::trace!( target: LOG_TARGET, "The head of parachain {:?} has been provided, but it is not tracked by the pallet", parachain, ); Self::deposit_event(Event::UntrackedParachainRejected { parachain }); continue; }, }; let update_result: Result<_, ()> = ParasInfo::::try_mutate(parachain, |stored_best_head| { let artifacts = Pallet::::update_parachain_head( parachain, stored_best_head.take(), relay_block_number, parachain_head_data, parachain_head_hash, )?; *stored_best_head = Some(artifacts.best_head); Ok(artifacts.prune_happened) }); // we're refunding weight if update has not happened and if pruning has not happened let is_update_happened = matches!(update_result, Ok(_)); if !is_update_happened { actual_weight = actual_weight .saturating_sub(WeightInfoOf::::parachain_head_storage_write_weight(T::DbWeight::get())); } let is_prune_happened = matches!(update_result, Ok(true)); if !is_prune_happened { actual_weight = actual_weight .saturating_sub(WeightInfoOf::::parachain_head_pruning_weight(T::DbWeight::get())); } } // even though we may have accepted some parachain heads, we can't allow relayers to submit // proof with unused trie nodes // => treat this as an error // // (we can throw error here, because now all our calls are transactional) storage.ensure_no_unused_nodes() }, ) .and_then(|r| r.map_err(HeaderChainError::StorageProof)) .map_err(|e| { log::trace!(target: LOG_TARGET, "Parachain heads storage proof is invalid: {:?}", e); Error::::HeaderChain(e) })?; Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes }) } /// Change `PalletOwner`. /// /// May only be called either by root, or by `PalletOwner`. #[pallet::call_index(1)] #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] pub fn set_owner(origin: OriginFor, new_owner: Option) -> DispatchResult { >::set_owner(origin, new_owner) } /// Halt or resume all pallet operations. /// /// May only be called either by root, or by `PalletOwner`. #[pallet::call_index(2)] #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] pub fn set_operating_mode( origin: OriginFor, operating_mode: BasicOperatingMode, ) -> DispatchResult { >::set_operating_mode(origin, operating_mode) } } impl, I: 'static> Pallet { /// Get stored parachain info. pub fn best_parachain_info(parachain: ParaId) -> Option { ParasInfo::::get(parachain) } /// Get best finalized head data of the given parachain. pub fn best_parachain_head(parachain: ParaId) -> Option { let best_para_head_hash = ParasInfo::::get(parachain)?.best_head_hash.head_hash; ImportedParaHeads::::get(parachain, best_para_head_hash).map(|h| h.into_inner()) } /// Get best finalized head hash of the given parachain. pub fn best_parachain_head_hash(parachain: ParaId) -> Option { Some(ParasInfo::::get(parachain)?.best_head_hash.head_hash) } /// Get best finalized head id of the given parachain. pub fn best_parachain_head_id + Parachain>( ) -> Result>, codec::Error> { let parachain = ParaId(C::PARACHAIN_ID); let best_head_hash = match Self::best_parachain_head_hash(parachain) { Some(best_head_hash) => best_head_hash, None => return Ok(None), }; let encoded_head = match Self::parachain_head(parachain, best_head_hash) { Some(encoded_head) => encoded_head, None => return Ok(None), }; encoded_head .decode_parachain_head_data::() .map(|data| Some(HeaderId(data.number, best_head_hash))) } /// Get parachain head data with given hash. pub fn parachain_head(parachain: ParaId, hash: ParaHash) -> Option { ImportedParaHeads::::get(parachain, hash).map(|h| h.into_inner()) } /// Read parachain head from storage proof. fn read_parachain_head( storage: &mut bp_runtime::StorageProofChecker, parachain: ParaId, ) -> Result, StorageProofError> { let parachain_head_key = parachain_head_storage_key_at_source(T::ParasPalletName::get(), parachain); storage.read_and_decode_value(parachain_head_key.0.as_ref()) } /// Try to update parachain head. pub(super) fn update_parachain_head( parachain: ParaId, stored_best_head: Option, new_at_relay_block_number: RelayBlockNumber, new_head_data: ParaStoredHeaderData, new_head_hash: ParaHash, ) -> Result { // check if head has been already updated at better relay chain block. Without this // check, we may import heads in random order let update = SubmitParachainHeadsInfo { at_relay_block_number: new_at_relay_block_number, para_id: parachain, para_head_hash: new_head_hash, }; if SubmitParachainHeadsHelper::::is_obsolete(&update) { Self::deposit_event(Event::RejectedObsoleteParachainHead { parachain, parachain_head_hash: new_head_hash, }); return Err(()) } // verify that the parachain head data size is <= `MaxParaHeadDataSize` let updated_head_data = match StoredParaHeadDataOf::::try_from_inner(new_head_data) { Ok(updated_head_data) => updated_head_data, Err(e) => { log::trace!( target: LOG_TARGET, "The parachain head can't be updated. The parachain head data size \ for {:?} is {}. It exceeds maximal configured size {}.", parachain, e.value_size, e.maximal_size, ); Self::deposit_event(Event::RejectedLargeParachainHead { parachain, parachain_head_hash: new_head_hash, parachain_head_size: e.value_size as _, }); return Err(()) }, }; let next_imported_hash_position = stored_best_head .map_or(0, |stored_best_head| stored_best_head.next_imported_hash_position); // insert updated best parachain head let head_hash_to_prune = ImportedParaHashes::::try_get(parachain, next_imported_hash_position); let updated_best_para_head = ParaInfo { best_head_hash: BestParaHeadHash { at_relay_block_number: new_at_relay_block_number, head_hash: new_head_hash, }, next_imported_hash_position: (next_imported_hash_position + 1) % T::HeadsToKeep::get(), }; ImportedParaHashes::::insert( parachain, next_imported_hash_position, new_head_hash, ); ImportedParaHeads::::insert(parachain, new_head_hash, updated_head_data); log::trace!( target: LOG_TARGET, "Updated head of parachain {:?} to {}", parachain, new_head_hash, ); // remove old head let prune_happened = head_hash_to_prune.is_ok(); if let Ok(head_hash_to_prune) = head_hash_to_prune { log::trace!( target: LOG_TARGET, "Pruning old head of parachain {:?}: {}", parachain, head_hash_to_prune, ); ImportedParaHeads::::remove(parachain, head_hash_to_prune); } Self::deposit_event(Event::UpdatedParachainHead { parachain, parachain_head_hash: new_head_hash, }); Ok(UpdateParachainHeadArtifacts { best_head: updated_best_para_head, prune_happened }) } } #[pallet::genesis_config] pub struct GenesisConfig, I: 'static = ()> { /// Initial pallet operating mode. pub operating_mode: BasicOperatingMode, /// Initial pallet owner. pub owner: Option, /// Dummy marker. pub phantom: sp_std::marker::PhantomData, } #[cfg(feature = "std")] impl, I: 'static> Default for GenesisConfig { fn default() -> Self { Self { operating_mode: Default::default(), owner: Default::default(), phantom: Default::default(), } } } #[pallet::genesis_build] impl, I: 'static> GenesisBuild for GenesisConfig { fn build(&self) { PalletOperatingMode::::put(self.operating_mode); if let Some(ref owner) = self.owner { PalletOwner::::put(owner); } } } /// Returns maximal number of parachains, supported by the pallet. pub struct MaybeMaxParachains(PhantomData<(T, I)>); impl, I: 'static> Get> for MaybeMaxParachains { fn get() -> Option { Some(T::ParaStoredHeaderDataBuilder::supported_parachains()) } } /// Returns total number of all parachains hashes/heads, stored by the pallet. pub struct MaybeMaxTotalParachainHashes(PhantomData<(T, I)>); impl, I: 'static> Get> for MaybeMaxTotalParachainHashes { fn get() -> Option { Some( T::ParaStoredHeaderDataBuilder::supported_parachains() .saturating_mul(T::HeadsToKeep::get()), ) } } } /// Single parachain header chain adapter. pub struct ParachainHeaders(PhantomData<(T, I, C)>); impl, I: 'static, C: Parachain> HeaderChain for ParachainHeaders { fn finalized_header_state_root(hash: HashOf) -> Option> { Pallet::::parachain_head(ParaId(C::PARACHAIN_ID), hash) .and_then(|head| head.decode_parachain_head_data::().ok()) .map(|h| h.state_root) } } /// (Re)initialize pallet with given header for using it in `pallet-bridge-messages` benchmarks. #[cfg(feature = "runtime-benchmarks")] pub fn initialize_for_benchmarks, I: 'static, PC: Parachain>( header: HeaderOf, ) { let parachain = ParaId(PC::PARACHAIN_ID); let parachain_head = ParaHead(header.encode()); let updated_head_data = T::ParaStoredHeaderDataBuilder::try_build(parachain, ¶chain_head) .expect("failed to build stored parachain head in benchmarks"); Pallet::::update_parachain_head( parachain, None, 0, updated_head_data, parachain_head.hash(), ) .expect("failed to insert parachain head in benchmarks"); } #[cfg(test)] mod tests { use super::*; use crate::mock::{ run_test, test_relay_header, BigParachainHeader, RegularParachainHasher, RegularParachainHeader, RuntimeEvent as TestEvent, RuntimeOrigin, TestRuntime, PARAS_PALLET_NAME, UNTRACKED_PARACHAIN_ID, }; use codec::Encode; use bp_parachains::{ BestParaHeadHash, BridgeParachainCall, ImportedParaHeadsKeyProvider, ParasInfoKeyProvider, }; use bp_runtime::{ record_all_trie_keys, BasicOperatingMode, OwnedBridgeModuleError, StorageDoubleMapKeyProvider, StorageMapKeyProvider, }; use bp_test_utils::{ authority_list, generate_owned_bridge_module_tests, make_default_justification, }; use frame_support::{ assert_noop, assert_ok, dispatch::DispatchResultWithPostInfo, storage::generator::{StorageDoubleMap, StorageMap}, traits::{Get, OnInitialize}, weights::Weight, }; use frame_system::{EventRecord, Pallet as System, Phase}; use sp_core::Hasher; use sp_runtime::{traits::Header as HeaderT, DispatchError}; use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB, TrieMut}; type BridgesGrandpaPalletInstance = pallet_bridge_grandpa::Instance1; type WeightInfo = ::WeightInfo; type DbWeight = ::DbWeight; fn initialize(state_root: RelayBlockHash) { pallet_bridge_grandpa::Pallet::::initialize( RuntimeOrigin::root(), bp_header_chain::InitializationData { header: Box::new(test_relay_header(0, state_root)), authority_list: authority_list(), set_id: 1, operating_mode: BasicOperatingMode::Normal, }, ) .unwrap(); System::::set_block_number(1); System::::reset_events(); } fn proceed(num: RelayBlockNumber, state_root: RelayBlockHash) -> ParaHash { pallet_bridge_grandpa::Pallet::::on_initialize( 0, ); let header = test_relay_header(num, state_root); let hash = header.hash(); let justification = make_default_justification(&header); assert_ok!( pallet_bridge_grandpa::Pallet::::submit_finality_proof( RuntimeOrigin::signed(1), Box::new(header), justification, ) ); hash } fn prepare_parachain_heads_proof( heads: Vec<(u32, ParaHead)>, ) -> (RelayBlockHash, ParaHeadsProof, Vec<(ParaId, ParaHash)>) { let mut parachains = Vec::with_capacity(heads.len()); let mut root = Default::default(); let mut mdb = MemoryDB::default(); { let mut trie = TrieDBMutBuilderV1::::new(&mut mdb, &mut root).build(); for (parachain, head) in heads { let storage_key = parachain_head_storage_key_at_source(PARAS_PALLET_NAME, ParaId(parachain)); trie.insert(&storage_key.0, &head.encode()) .map_err(|_| "TrieMut::insert has failed") .expect("TrieMut::insert should not fail in tests"); parachains.push((ParaId(parachain), head.hash())); } } // generate storage proof to be delivered to This chain let storage_proof = record_all_trie_keys::, _>(&mdb, &root) .map_err(|_| "record_all_trie_keys has failed") .expect("record_all_trie_keys should not fail in benchmarks"); (root, ParaHeadsProof(storage_proof), parachains) } fn initial_best_head(parachain: u32) -> ParaInfo { ParaInfo { best_head_hash: BestParaHeadHash { at_relay_block_number: 0, head_hash: head_data(parachain, 0).hash(), }, next_imported_hash_position: 1, } } fn head_data(parachain: u32, head_number: u32) -> ParaHead { ParaHead( RegularParachainHeader::new( head_number as _, Default::default(), RegularParachainHasher::hash(&(parachain, head_number).encode()), Default::default(), Default::default(), ) .encode(), ) } fn stored_head_data(parachain: u32, head_number: u32) -> ParaStoredHeaderData { ParaStoredHeaderData( (head_number as u64, RegularParachainHasher::hash(&(parachain, head_number).encode())) .encode(), ) } fn big_head_data(parachain: u32, head_number: u32) -> ParaHead { ParaHead( BigParachainHeader::new( head_number as _, Default::default(), RegularParachainHasher::hash(&(parachain, head_number).encode()), Default::default(), Default::default(), ) .encode(), ) } fn big_stored_head_data(parachain: u32, head_number: u32) -> ParaStoredHeaderData { ParaStoredHeaderData( (head_number as u128, RegularParachainHasher::hash(&(parachain, head_number).encode())) .encode(), ) } fn head_hash(parachain: u32, head_number: u32) -> ParaHash { head_data(parachain, head_number).hash() } fn import_parachain_1_head( relay_chain_block: RelayBlockNumber, relay_state_root: RelayBlockHash, parachains: Vec<(ParaId, ParaHash)>, proof: ParaHeadsProof, ) -> DispatchResultWithPostInfo { Pallet::::submit_parachain_heads( RuntimeOrigin::signed(1), (relay_chain_block, test_relay_header(relay_chain_block, relay_state_root).hash()), parachains, proof, ) } fn weight_of_import_parachain_1_head(proof: &ParaHeadsProof, prune_expected: bool) -> Weight { let db_weight = ::DbWeight::get(); WeightInfoOf::::submit_parachain_heads_weight(db_weight, proof, 1) .saturating_sub(if prune_expected { Weight::zero() } else { WeightInfoOf::::parachain_head_pruning_weight(db_weight) }) } #[test] fn submit_parachain_heads_checks_operating_mode() { let (state_root, proof, parachains) = prepare_parachain_heads_proof(vec![(1, head_data(1, 0))]); run_test(|| { initialize(state_root); // `submit_parachain_heads()` should fail when the pallet is halted. PalletOperatingMode::::put(BasicOperatingMode::Halted); assert_noop!( Pallet::::submit_parachain_heads( RuntimeOrigin::signed(1), (0, test_relay_header(0, state_root).hash()), parachains.clone(), proof.clone(), ), Error::::BridgeModule(OwnedBridgeModuleError::Halted) ); // `submit_parachain_heads()` should succeed now that the pallet is resumed. PalletOperatingMode::::put(BasicOperatingMode::Normal); assert_ok!(Pallet::::submit_parachain_heads( RuntimeOrigin::signed(1), (0, test_relay_header(0, state_root).hash()), parachains, proof, ),); }); } #[test] fn imports_initial_parachain_heads() { let (state_root, proof, parachains) = prepare_parachain_heads_proof(vec![(1, head_data(1, 0)), (3, head_data(3, 10))]); run_test(|| { initialize(state_root); // we're trying to update heads of parachains 1, 2 and 3 let expected_weight = WeightInfo::submit_parachain_heads_weight(DbWeight::get(), &proof, 2); let result = Pallet::::submit_parachain_heads( RuntimeOrigin::signed(1), (0, test_relay_header(0, state_root).hash()), parachains, proof, ); assert_ok!(result); assert_eq!(result.expect("checked above").actual_weight, Some(expected_weight)); // but only 1 and 2 are updated, because proof is missing head of parachain#2 assert_eq!(ParasInfo::::get(ParaId(1)), Some(initial_best_head(1))); assert_eq!(ParasInfo::::get(ParaId(2)), None); assert_eq!( ParasInfo::::get(ParaId(3)), Some(ParaInfo { best_head_hash: BestParaHeadHash { at_relay_block_number: 0, head_hash: head_data(3, 10).hash() }, next_imported_hash_position: 1, }) ); assert_eq!( ImportedParaHeads::::get( ParaId(1), initial_best_head(1).best_head_hash.head_hash ) .map(|h| h.into_inner()), Some(stored_head_data(1, 0)) ); assert_eq!( ImportedParaHeads::::get( ParaId(2), initial_best_head(2).best_head_hash.head_hash ) .map(|h| h.into_inner()), None ); assert_eq!( ImportedParaHeads::::get(ParaId(3), head_hash(3, 10)) .map(|h| h.into_inner()), Some(stored_head_data(3, 10)) ); assert_eq!( System::::events(), vec![ EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(1), parachain_head_hash: initial_best_head(1).best_head_hash.head_hash, }), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(3), parachain_head_hash: head_data(3, 10).hash(), }), topics: vec![], } ], ); }); } #[test] fn imports_parachain_heads_is_able_to_progress() { let (state_root_5, proof_5, parachains_5) = prepare_parachain_heads_proof(vec![(1, head_data(1, 5))]); let (state_root_10, proof_10, parachains_10) = prepare_parachain_heads_proof(vec![(1, head_data(1, 10))]); run_test(|| { // start with relay block #0 and import head#5 of parachain#1 initialize(state_root_5); assert_ok!(import_parachain_1_head(0, state_root_5, parachains_5, proof_5)); assert_eq!( ParasInfo::::get(ParaId(1)), Some(ParaInfo { best_head_hash: BestParaHeadHash { at_relay_block_number: 0, head_hash: head_data(1, 5).hash() }, next_imported_hash_position: 1, }) ); assert_eq!( ImportedParaHeads::::get(ParaId(1), head_data(1, 5).hash()) .map(|h| h.into_inner()), Some(stored_head_data(1, 5)) ); assert_eq!( ImportedParaHeads::::get(ParaId(1), head_data(1, 10).hash()) .map(|h| h.into_inner()), None ); assert_eq!( System::::events(), vec![EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(1), parachain_head_hash: head_data(1, 5).hash(), }), topics: vec![], }], ); // import head#10 of parachain#1 at relay block #1 let relay_1_hash = proceed(1, state_root_10); assert_ok!(import_parachain_1_head(1, state_root_10, parachains_10, proof_10)); assert_eq!( ParasInfo::::get(ParaId(1)), Some(ParaInfo { best_head_hash: BestParaHeadHash { at_relay_block_number: 1, head_hash: head_data(1, 10).hash() }, next_imported_hash_position: 2, }) ); assert_eq!( ImportedParaHeads::::get(ParaId(1), head_data(1, 5).hash()) .map(|h| h.into_inner()), Some(stored_head_data(1, 5)) ); assert_eq!( ImportedParaHeads::::get(ParaId(1), head_data(1, 10).hash()) .map(|h| h.into_inner()), Some(stored_head_data(1, 10)) ); assert_eq!( System::::events(), vec![ EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(1), parachain_head_hash: head_data(1, 5).hash(), }), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: TestEvent::Grandpa1( pallet_bridge_grandpa::Event::UpdatedBestFinalizedHeader { number: 1, hash: relay_1_hash, } ), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(1), parachain_head_hash: head_data(1, 10).hash(), }), topics: vec![], } ], ); }); } #[test] fn ignores_untracked_parachain() { let (state_root, proof, parachains) = prepare_parachain_heads_proof(vec![ (1, head_data(1, 5)), (UNTRACKED_PARACHAIN_ID, head_data(1, 5)), (2, head_data(1, 5)), ]); run_test(|| { // start with relay block #0 and try to import head#5 of parachain#1 and untracked // parachain let expected_weight = WeightInfo::submit_parachain_heads_weight(DbWeight::get(), &proof, 3) .saturating_sub(WeightInfo::parachain_head_storage_write_weight( DbWeight::get(), )); initialize(state_root); let result = Pallet::::submit_parachain_heads( RuntimeOrigin::signed(1), (0, test_relay_header(0, state_root).hash()), parachains, proof, ); assert_ok!(result); assert_eq!(result.expect("checked above").actual_weight, Some(expected_weight)); assert_eq!( ParasInfo::::get(ParaId(1)), Some(ParaInfo { best_head_hash: BestParaHeadHash { at_relay_block_number: 0, head_hash: head_data(1, 5).hash() }, next_imported_hash_position: 1, }) ); assert_eq!(ParasInfo::::get(ParaId(UNTRACKED_PARACHAIN_ID)), None,); assert_eq!( ParasInfo::::get(ParaId(2)), Some(ParaInfo { best_head_hash: BestParaHeadHash { at_relay_block_number: 0, head_hash: head_data(1, 5).hash() }, next_imported_hash_position: 1, }) ); assert_eq!( System::::events(), vec![ EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(1), parachain_head_hash: head_data(1, 5).hash(), }), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UntrackedParachainRejected { parachain: ParaId(UNTRACKED_PARACHAIN_ID), }), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(2), parachain_head_hash: head_data(1, 5).hash(), }), topics: vec![], } ], ); }); } #[test] fn does_nothing_when_already_imported_this_head_at_previous_relay_header() { let (state_root, proof, parachains) = prepare_parachain_heads_proof(vec![(1, head_data(1, 0))]); run_test(|| { // import head#0 of parachain#1 at relay block#0 initialize(state_root); assert_ok!(import_parachain_1_head(0, state_root, parachains.clone(), proof.clone())); assert_eq!(ParasInfo::::get(ParaId(1)), Some(initial_best_head(1))); assert_eq!( System::::events(), vec![EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(1), parachain_head_hash: initial_best_head(1).best_head_hash.head_hash, }), topics: vec![], }], ); // try to import head#0 of parachain#1 at relay block#1 // => call succeeds, but nothing is changed let relay_1_hash = proceed(1, state_root); assert_ok!(import_parachain_1_head(1, state_root, parachains, proof)); assert_eq!(ParasInfo::::get(ParaId(1)), Some(initial_best_head(1))); assert_eq!( System::::events(), vec![ EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(1), parachain_head_hash: initial_best_head(1).best_head_hash.head_hash, }), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: TestEvent::Grandpa1( pallet_bridge_grandpa::Event::UpdatedBestFinalizedHeader { number: 1, hash: relay_1_hash, } ), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::RejectedObsoleteParachainHead { parachain: ParaId(1), parachain_head_hash: initial_best_head(1).best_head_hash.head_hash, }), topics: vec![], } ], ); }); } #[test] fn does_nothing_when_already_imported_head_at_better_relay_header() { let (state_root_5, proof_5, parachains_5) = prepare_parachain_heads_proof(vec![(1, head_data(1, 5))]); let (state_root_10, proof_10, parachains_10) = prepare_parachain_heads_proof(vec![(1, head_data(1, 10))]); run_test(|| { // start with relay block #0 initialize(state_root_5); // head#10 of parachain#1 at relay block#1 let relay_1_hash = proceed(1, state_root_10); assert_ok!(import_parachain_1_head(1, state_root_10, parachains_10, proof_10)); assert_eq!( ParasInfo::::get(ParaId(1)), Some(ParaInfo { best_head_hash: BestParaHeadHash { at_relay_block_number: 1, head_hash: head_data(1, 10).hash() }, next_imported_hash_position: 1, }) ); assert_eq!( System::::events(), vec![ EventRecord { phase: Phase::Initialization, event: TestEvent::Grandpa1( pallet_bridge_grandpa::Event::UpdatedBestFinalizedHeader { number: 1, hash: relay_1_hash, } ), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(1), parachain_head_hash: head_data(1, 10).hash(), }), topics: vec![], } ], ); // now try to import head#5 at relay block#0 // => nothing is changed, because better head has already been imported assert_ok!(import_parachain_1_head(0, state_root_5, parachains_5, proof_5)); assert_eq!( ParasInfo::::get(ParaId(1)), Some(ParaInfo { best_head_hash: BestParaHeadHash { at_relay_block_number: 1, head_hash: head_data(1, 10).hash() }, next_imported_hash_position: 1, }) ); assert_eq!( System::::events(), vec![ EventRecord { phase: Phase::Initialization, event: TestEvent::Grandpa1( pallet_bridge_grandpa::Event::UpdatedBestFinalizedHeader { number: 1, hash: relay_1_hash, } ), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(1), parachain_head_hash: head_data(1, 10).hash(), }), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::RejectedObsoleteParachainHead { parachain: ParaId(1), parachain_head_hash: head_data(1, 5).hash(), }), topics: vec![], } ], ); }); } #[test] fn does_nothing_when_parachain_head_is_too_large() { let (state_root, proof, parachains) = prepare_parachain_heads_proof(vec![(1, head_data(1, 5)), (4, big_head_data(1, 5))]); run_test(|| { // start with relay block #0 and try to import head#5 of parachain#1 and big parachain initialize(state_root); let result = Pallet::::submit_parachain_heads( RuntimeOrigin::signed(1), (0, test_relay_header(0, state_root).hash()), parachains, proof, ); assert_ok!(result); assert_eq!( ParasInfo::::get(ParaId(1)), Some(ParaInfo { best_head_hash: BestParaHeadHash { at_relay_block_number: 0, head_hash: head_data(1, 5).hash() }, next_imported_hash_position: 1, }) ); assert_eq!(ParasInfo::::get(ParaId(4)), None); assert_eq!( System::::events(), vec![ EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(1), parachain_head_hash: head_data(1, 5).hash(), }), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::RejectedLargeParachainHead { parachain: ParaId(4), parachain_head_hash: big_head_data(1, 5).hash(), parachain_head_size: big_stored_head_data(1, 5).encoded_size() as u32, }), topics: vec![], }, ], ); }); } #[test] fn prunes_old_heads() { run_test(|| { let heads_to_keep = crate::mock::HeadsToKeep::get(); // import exactly `HeadsToKeep` headers for i in 0..heads_to_keep { let (state_root, proof, parachains) = prepare_parachain_heads_proof(vec![(1, head_data(1, i))]); if i == 0 { initialize(state_root); } else { proceed(i, state_root); } let expected_weight = weight_of_import_parachain_1_head(&proof, false); let result = import_parachain_1_head(i, state_root, parachains, proof); assert_ok!(result); assert_eq!(result.expect("checked above").actual_weight, Some(expected_weight)); } // nothing is pruned yet for i in 0..heads_to_keep { assert!(ImportedParaHeads::::get(ParaId(1), head_data(1, i).hash()) .is_some()); } // import next relay chain header and next parachain head let (state_root, proof, parachains) = prepare_parachain_heads_proof(vec![(1, head_data(1, heads_to_keep))]); proceed(heads_to_keep, state_root); let expected_weight = weight_of_import_parachain_1_head(&proof, true); let result = import_parachain_1_head(heads_to_keep, state_root, parachains, proof); assert_ok!(result); assert_eq!(result.expect("checked above").actual_weight, Some(expected_weight)); // and the head#0 is pruned assert!( ImportedParaHeads::::get(ParaId(1), head_data(1, 0).hash()).is_none() ); for i in 1..=heads_to_keep { assert!(ImportedParaHeads::::get(ParaId(1), head_data(1, i).hash()) .is_some()); } }); } #[test] fn fails_on_unknown_relay_chain_block() { let (state_root, proof, parachains) = prepare_parachain_heads_proof(vec![(1, head_data(1, 5))]); run_test(|| { // start with relay block #0 initialize(state_root); // try to import head#5 of parachain#1 at unknown relay chain block #1 assert_noop!( import_parachain_1_head(1, state_root, parachains, proof), Error::::UnknownRelayChainBlock ); }); } #[test] fn fails_on_invalid_storage_proof() { let (_state_root, proof, parachains) = prepare_parachain_heads_proof(vec![(1, head_data(1, 5))]); run_test(|| { // start with relay block #0 initialize(Default::default()); // try to import head#5 of parachain#1 at relay chain block #0 assert_noop!( import_parachain_1_head(0, Default::default(), parachains, proof), Error::::HeaderChain(HeaderChainError::StorageProof( StorageProofError::StorageRootMismatch )) ); }); } #[test] fn is_not_rewriting_existing_head_if_failed_to_read_updated_head() { let (state_root_5, proof_5, parachains_5) = prepare_parachain_heads_proof(vec![(1, head_data(1, 5))]); let (state_root_10_at_20, proof_10_at_20, parachains_10_at_20) = prepare_parachain_heads_proof(vec![(2, head_data(2, 10))]); let (state_root_10_at_30, proof_10_at_30, parachains_10_at_30) = prepare_parachain_heads_proof(vec![(1, head_data(1, 10))]); run_test(|| { // we've already imported head#5 of parachain#1 at relay block#10 initialize(state_root_5); import_parachain_1_head(0, state_root_5, parachains_5, proof_5).expect("ok"); assert_eq!( Pallet::::best_parachain_head(ParaId(1)), Some(stored_head_data(1, 5)) ); // then if someone is pretending to provide updated head#10 of parachain#1 at relay // block#20, but fails to do that // // => we'll leave previous value proceed(20, state_root_10_at_20); assert_ok!(Pallet::::submit_parachain_heads( RuntimeOrigin::signed(1), (20, test_relay_header(20, state_root_10_at_20).hash()), parachains_10_at_20, proof_10_at_20, ),); assert_eq!( Pallet::::best_parachain_head(ParaId(1)), Some(stored_head_data(1, 5)) ); // then if someone is pretending to provide updated head#10 of parachain#1 at relay // block#30, and actualy provides it // // => we'll update value proceed(30, state_root_10_at_30); assert_ok!(Pallet::::submit_parachain_heads( RuntimeOrigin::signed(1), (30, test_relay_header(30, state_root_10_at_30).hash()), parachains_10_at_30, proof_10_at_30, ),); assert_eq!( Pallet::::best_parachain_head(ParaId(1)), Some(stored_head_data(1, 10)) ); }); } #[test] fn storage_keys_computed_properly() { assert_eq!( ParasInfo::::storage_map_final_key(ParaId(42)).to_vec(), ParasInfoKeyProvider::final_key("Parachains", &ParaId(42)).0 ); assert_eq!( ImportedParaHeads::::storage_double_map_final_key( ParaId(42), ParaHash::from([21u8; 32]) ) .to_vec(), ImportedParaHeadsKeyProvider::final_key( "Parachains", &ParaId(42), &ParaHash::from([21u8; 32]) ) .0, ); } #[test] fn ignores_parachain_head_if_it_is_missing_from_storage_proof() { let (state_root, proof, _) = prepare_parachain_heads_proof(vec![]); let parachains = vec![(ParaId(2), Default::default())]; run_test(|| { initialize(state_root); assert_ok!(Pallet::::submit_parachain_heads( RuntimeOrigin::signed(1), (0, test_relay_header(0, state_root).hash()), parachains, proof, )); assert_eq!( System::::events(), vec![EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::MissingParachainHead { parachain: ParaId(2), }), topics: vec![], }], ); }); } #[test] fn ignores_parachain_head_if_parachain_head_hash_is_wrong() { let (state_root, proof, _) = prepare_parachain_heads_proof(vec![(1, head_data(1, 0))]); let parachains = vec![(ParaId(1), head_data(1, 10).hash())]; run_test(|| { initialize(state_root); assert_ok!(Pallet::::submit_parachain_heads( RuntimeOrigin::signed(1), (0, test_relay_header(0, state_root).hash()), parachains, proof, )); assert_eq!( System::::events(), vec![EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::IncorrectParachainHeadHash { parachain: ParaId(1), parachain_head_hash: head_data(1, 10).hash(), actual_parachain_head_hash: head_data(1, 0).hash(), }), topics: vec![], }], ); }); } #[test] fn test_bridge_parachain_call_is_correctly_defined() { let (state_root, proof, _) = prepare_parachain_heads_proof(vec![(1, head_data(1, 0))]); let parachains = vec![(ParaId(2), Default::default())]; let relay_header_id = (0, test_relay_header(0, state_root).hash()); let direct_submit_parachain_heads_call = Call::::submit_parachain_heads { at_relay_block: relay_header_id, parachains: parachains.clone(), parachain_heads_proof: proof.clone(), }; let indirect_submit_parachain_heads_call = BridgeParachainCall::submit_parachain_heads { at_relay_block: relay_header_id, parachains, parachain_heads_proof: proof, }; assert_eq!( direct_submit_parachain_heads_call.encode(), indirect_submit_parachain_heads_call.encode() ); } generate_owned_bridge_module_tests!(BasicOperatingMode::Normal, BasicOperatingMode::Halted); #[test] fn maybe_max_parachains_returns_correct_value() { assert_eq!(MaybeMaxParachains::::get(), Some(mock::TOTAL_PARACHAINS)); } #[test] fn maybe_max_total_parachain_hashes_returns_correct_value() { assert_eq!( MaybeMaxTotalParachainHashes::::get(), Some(mock::TOTAL_PARACHAINS * mock::HeadsToKeep::get()), ); } }