// Copyright 2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity Bridges Common is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . //! Parachains finality module. //! //! This module needs to be deployed with GRANDPA module, which is syncing relay //! chain blocks. The main entry point of this module is `submit_parachain_heads`, which //! accepts storage proof of some parachain `Heads` entries from bridged relay chain. //! It requires corresponding relay headers to be already synced. #![cfg_attr(not(feature = "std"), no_std)] pub use weights::WeightInfo; pub use weights_ext::WeightInfoExt; use bp_parachains::{parachain_head_storage_key_at_source, ParaInfo}; use bp_polkadot_core::parachains::{ParaHash, ParaHasher, ParaHead, ParaHeadsProof, ParaId}; use bp_runtime::StorageProofError; use frame_support::{dispatch::PostDispatchInfo, traits::Contains}; use sp_runtime::traits::Header as HeaderT; use sp_std::vec::Vec; // Re-export in crate namespace for `construct_runtime!`. pub use pallet::*; pub mod weights; pub mod weights_ext; #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking; mod extension; #[cfg(test)] mod mock; /// The target that will be used when publishing logs related to this pallet. pub const LOG_TARGET: &str = "runtime::bridge-parachains"; /// Block hash of the bridged relay chain. pub type RelayBlockHash = bp_polkadot_core::Hash; /// Block number of the bridged relay chain. pub type RelayBlockNumber = bp_polkadot_core::BlockNumber; /// Hasher of the bridged relay chain. pub type RelayBlockHasher = bp_polkadot_core::Hasher; /// Artifacts of the parachains head update. struct UpdateParachainHeadArtifacts { /// New best head of the parachain. pub best_head: ParaInfo, /// If `true`, some old parachain head has been pruned during update. pub prune_happened: bool, } #[frame_support::pallet] pub mod pallet { use super::*; use bp_parachains::{BestParaHeadHash, ImportedParaHeadsKeyProvider, ParasInfoKeyProvider}; use bp_runtime::{ BasicOperatingMode, BoundedStorageValue, OwnedBridgeModule, StorageDoubleMapKeyProvider, StorageMapKeyProvider, }; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; /// Stored parachain head of given parachains pallet. pub type StoredParaHeadOf = BoundedStorageValue<>::MaxParaHeadSize, ParaHead>; /// Weight info of the given parachains pallet. pub type WeightInfoOf = >::WeightInfo; #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event, I: 'static = ()> { /// The caller has provided head of parachain that the pallet is not configured to track. UntrackedParachainRejected { parachain: ParaId }, /// The caller has declared that he has provided given parachain head, but it is missing /// from the storage proof. MissingParachainHead { parachain: ParaId }, /// The caller has provided parachain head hash that is not matching the hash read from the /// storage proof. IncorrectParachainHeadHash { parachain: ParaId, parachain_head_hash: ParaHash, actual_parachain_head_hash: ParaHash, }, /// The caller has provided obsolete parachain head, which is already known to the pallet. RejectedObsoleteParachainHead { parachain: ParaId, parachain_head_hash: ParaHash }, /// The caller has provided parachain head that exceeds the maximal configured head size. RejectedLargeParachainHead { parachain: ParaId, parachain_head_hash: ParaHash, parachain_head_size: u32, }, /// Parachain head has been updated. UpdatedParachainHead { parachain: ParaId, parachain_head_hash: ParaHash }, } #[pallet::error] pub enum Error { /// Relay chain block hash is unknown to us. UnknownRelayChainBlock, /// The number of stored relay block is different from what the relayer has provided. InvalidRelayChainBlockNumber, /// Invalid storage proof has been passed. InvalidStorageProof, /// Given parachain head is unknown. UnknownParaHead, /// The storage proof doesn't contains storage root. So it is invalid for given header. StorageRootMismatch, /// Failed to extract state root from given parachain head. FailedToExtractStateRoot, /// Error generated by the `OwnedBridgeModule` trait. BridgeModule(bp_runtime::OwnedBridgeModuleError), } #[pallet::config] #[pallet::disable_frame_system_supertrait_check] pub trait Config: pallet_bridge_grandpa::Config { /// The overarching event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Benchmarks results from runtime we're plugged into. type WeightInfo: WeightInfoExt; /// Instance of bridges GRANDPA pallet (within this runtime) that this pallet is linked to. /// /// The GRANDPA pallet instance must be configured to import headers of relay chain that /// we're interested in. type BridgesGrandpaPalletInstance: 'static; /// Name of the original `paras` pallet in the `construct_runtime!()` call at the bridged /// chain. /// /// Please keep in mind that this should be the name of the `runtime_parachains::paras` /// pallet from polkadot repository, not the `pallet-bridge-parachains`. #[pallet::constant] type ParasPalletName: Get<&'static str>; /// Set of parachains that are tracked by this pallet. /// /// The set may be extended easily, without requiring any runtime upgrades. Removing tracked /// parachain requires special handling - pruning existing heads and cleaning related data /// structures. type TrackedParachains: Contains; /// Maximal number of single parachain heads to keep in the storage. /// /// The setting is there to prevent growing the on-chain state indefinitely. Note /// the setting does not relate to parachain block numbers - we will simply keep as much /// items in the storage, so it doesn't guarantee any fixed timeframe for heads. /// /// Incautious change of this constant may lead to orphan entries in the runtime storage. #[pallet::constant] type HeadsToKeep: Get; /// Maximal size (in bytes) of the SCALE-encoded parachain head. /// /// Keep in mind that the size of any tracked parachain header must not exceed this value. /// So if you're going to track multiple parachains, one of which is storing large digests /// in its headers, you shall choose this maximal value. /// /// There's no mandatory headers in this pallet, so it can't stall if there's some header /// that exceeds this bound. #[pallet::constant] type MaxParaHeadSize: Get; } /// Optional pallet owner. /// /// Pallet owner has a right to halt all pallet operations and then resume them. If it is /// `None`, then there are no direct ways to halt/resume pallet operations, but other /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt /// flag directly or call the `halt_operations`). #[pallet::storage] pub type PalletOwner, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; /// The current operating mode of the pallet. /// /// Depending on the mode either all, or no transactions will be allowed. #[pallet::storage] pub type PalletOperatingMode, I: 'static = ()> = StorageValue<_, BasicOperatingMode, ValueQuery>; /// Parachains info. /// /// Contains the following info: /// - best parachain head hash /// - the head of the `ImportedParaHashes` ring buffer #[pallet::storage] pub type ParasInfo, I: 'static = ()> = StorageMap< _, ::Hasher, ::Key, ::Value, >; /// Parachain heads which have been imported into the pallet. #[pallet::storage] pub type ImportedParaHeads, I: 'static = ()> = StorageDoubleMap< _, ::Hasher1, ::Key1, ::Hasher2, ::Key2, StoredParaHeadOf, >; /// A ring buffer of imported parachain head hashes. Ordered by the insertion time. #[pallet::storage] pub(super) type ImportedParaHashes, I: 'static = ()> = StorageDoubleMap<_, Blake2_128Concat, ParaId, Twox64Concat, u32, ParaHash>; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] pub struct Pallet(PhantomData<(T, I)>); impl, I: 'static> OwnedBridgeModule for Pallet { const LOG_TARGET: &'static str = LOG_TARGET; type OwnerStorage = PalletOwner; type OperatingMode = BasicOperatingMode; type OperatingModeStorage = PalletOperatingMode; } #[pallet::call] impl, I: 'static> Pallet where >::BridgedChain: bp_runtime::Chain< BlockNumber = RelayBlockNumber, Hash = RelayBlockHash, Hasher = RelayBlockHasher, >, { /// Submit proof of one or several parachain heads. /// /// The proof is supposed to be proof of some `Heads` entries from the /// `polkadot-runtime-parachains::paras` pallet instance, deployed at the bridged chain. /// The proof is supposed to be crafted at the `relay_header_hash` that must already be /// imported by corresponding GRANDPA pallet at this chain. #[pallet::weight(WeightInfoOf::::submit_parachain_heads_weight( T::DbWeight::get(), parachain_heads_proof, parachains.len() as _, ))] pub fn submit_parachain_heads( _origin: OriginFor, at_relay_block: (RelayBlockNumber, RelayBlockHash), parachains: Vec<(ParaId, ParaHash)>, parachain_heads_proof: ParaHeadsProof, ) -> DispatchResultWithPostInfo { Self::ensure_not_halted().map_err(Error::::BridgeModule)?; // we'll need relay chain header to verify that parachains heads are always increasing. let (relay_block_number, relay_block_hash) = at_relay_block; let relay_block = pallet_bridge_grandpa::ImportedHeaders::< T, T::BridgesGrandpaPalletInstance, >::get(relay_block_hash) .ok_or(Error::::UnknownRelayChainBlock)?; ensure!( *relay_block.number() == relay_block_number, Error::::InvalidRelayChainBlockNumber, ); // now parse storage proof and read parachain heads let mut actual_weight = WeightInfoOf::::submit_parachain_heads_weight( T::DbWeight::get(), ¶chain_heads_proof, parachains.len() as _, ); pallet_bridge_grandpa::Pallet::::parse_finalized_storage_proof( relay_block_hash, sp_trie::StorageProof::new(parachain_heads_proof.0), move |storage| { for (parachain, parachain_head_hash) in parachains { // if we're not tracking this parachain, we'll just ignore its head proof here if !T::TrackedParachains::contains(¶chain) { log::trace!( target: LOG_TARGET, "The head of parachain {:?} has been provided, but it is not tracked by the pallet", parachain, ); Self::deposit_event(Event::UntrackedParachainRejected { parachain }); continue; } let parachain_head = match Pallet::::read_parachain_head(&storage, parachain) { Ok(Some(parachain_head)) => parachain_head, Ok(None) => { log::trace!( target: LOG_TARGET, "The head of parachain {:?} is None. {}", parachain, if ParasInfo::::contains_key(parachain) { "Looks like it is not yet registered at the source relay chain" } else { "Looks like it has been deregistered from the source relay chain" }, ); Self::deposit_event(Event::MissingParachainHead { parachain }); continue; }, Err(e) => { log::trace!( target: LOG_TARGET, "The read of head of parachain {:?} has failed: {:?}", parachain, e, ); Self::deposit_event(Event::MissingParachainHead { parachain }); continue; }, }; // if relayer has specified invalid parachain head hash, ignore the head // (this isn't strictly necessary, but better safe than sorry) let actual_parachain_head_hash = parachain_head.hash(); if parachain_head_hash != actual_parachain_head_hash { log::trace!( target: LOG_TARGET, "The submitter has specified invalid parachain {:?} head hash: {:?} vs {:?}", parachain, parachain_head_hash, actual_parachain_head_hash, ); Self::deposit_event(Event::IncorrectParachainHeadHash { parachain, parachain_head_hash, actual_parachain_head_hash, }); continue; } let update_result: Result<_, ()> = ParasInfo::::try_mutate(parachain, |stored_best_head| { let artifacts = Pallet::::update_parachain_head( parachain, stored_best_head.take(), relay_block_number, parachain_head, parachain_head_hash, )?; *stored_best_head = Some(artifacts.best_head); Ok(artifacts.prune_happened) }); // we're refunding weight if update has not happened and if pruning has not happened let is_update_happened = matches!(update_result, Ok(_)); if !is_update_happened { actual_weight = actual_weight .saturating_sub(WeightInfoOf::::parachain_head_storage_write_weight(T::DbWeight::get())); } let is_prune_happened = matches!(update_result, Ok(true)); if !is_prune_happened { actual_weight = actual_weight .saturating_sub(WeightInfoOf::::parachain_head_pruning_weight(T::DbWeight::get())); } } }, ) .map_err(|_| Error::::InvalidStorageProof)?; Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes }) } /// Change `PalletOwner`. /// /// May only be called either by root, or by `PalletOwner`. #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] pub fn set_owner(origin: OriginFor, new_owner: Option) -> DispatchResult { >::set_owner(origin, new_owner) } /// Halt or resume all pallet operations. /// /// May only be called either by root, or by `PalletOwner`. #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] pub fn set_operating_mode( origin: OriginFor, operating_mode: BasicOperatingMode, ) -> DispatchResult { >::set_operating_mode(origin, operating_mode) } } impl, I: 'static> Pallet { /// Get best finalized header of the given parachain. pub fn best_parachain_head(parachain: ParaId) -> Option { let best_para_head_hash = ParasInfo::::get(parachain)?.best_head_hash.head_hash; ImportedParaHeads::::get(parachain, best_para_head_hash).map(|h| h.into_inner()) } /// Get parachain head with given hash. pub fn parachain_head(parachain: ParaId, hash: ParaHash) -> Option { ImportedParaHeads::::get(parachain, hash).map(|h| h.into_inner()) } /// Verify that the passed storage proof is valid, given it is crafted using /// known finalized header. If the proof is valid, then the `parse` callback /// is called and the function returns its result. pub fn parse_finalized_storage_proof( parachain: ParaId, hash: ParaHash, storage_proof: sp_trie::StorageProof, decode_state_root: impl FnOnce(ParaHead) -> Option, parse: impl FnOnce(bp_runtime::StorageProofChecker) -> R, ) -> Result { let para_head = Self::parachain_head(parachain, hash).ok_or(Error::::UnknownParaHead)?; let state_root = decode_state_root(para_head).ok_or(Error::::FailedToExtractStateRoot)?; let storage_proof_checker = bp_runtime::StorageProofChecker::new(state_root, storage_proof) .map_err(|_| Error::::StorageRootMismatch)?; Ok(parse(storage_proof_checker)) } /// Read parachain head from storage proof. fn read_parachain_head( storage: &bp_runtime::StorageProofChecker, parachain: ParaId, ) -> Result, StorageProofError> { let parachain_head_key = parachain_head_storage_key_at_source(T::ParasPalletName::get(), parachain); storage.read_and_decode_value(parachain_head_key.0.as_ref()) } /// Check if para head has been already updated at better relay chain block. /// Without this check, we may import heads in random order. /// /// Returns `true` if the pallet is ready to import given parachain head. /// Returns `false` if the pallet already knows the same or better parachain head. #[must_use] pub fn validate_updated_parachain_head( parachain: ParaId, maybe_stored_best_head: &Option, updated_at_relay_block_number: RelayBlockNumber, updated_head_hash: ParaHash, err_log_prefix: &str, ) -> bool { let stored_best_head = match maybe_stored_best_head { Some(stored_best_head) => stored_best_head, None => return true, }; if stored_best_head.best_head_hash.at_relay_block_number >= updated_at_relay_block_number { log::trace!( target: LOG_TARGET, "{}. The parachain head for {:?} was already updated at better relay chain block {} >= {}.", err_log_prefix, parachain, stored_best_head.best_head_hash.at_relay_block_number, updated_at_relay_block_number ); return false } if stored_best_head.best_head_hash.head_hash == updated_head_hash { log::trace!( target: LOG_TARGET, "{}. The parachain head hash for {:?} was already updated to {} at block {} < {}.", err_log_prefix, parachain, updated_head_hash, stored_best_head.best_head_hash.at_relay_block_number, updated_at_relay_block_number ); return false } true } /// Try to update parachain head. pub(super) fn update_parachain_head( parachain: ParaId, stored_best_head: Option, updated_at_relay_block_number: RelayBlockNumber, updated_head: ParaHead, updated_head_hash: ParaHash, ) -> Result { // check if head has been already updated at better relay chain block. Without this // check, we may import heads in random order let err_log_prefix = "The parachain head can't be updated"; let is_valid = Self::validate_updated_parachain_head( parachain, &stored_best_head, updated_at_relay_block_number, updated_head_hash, err_log_prefix, ); if !is_valid { Self::deposit_event(Event::RejectedObsoleteParachainHead { parachain, parachain_head_hash: updated_head_hash, }); return Err(()) } // verify that the parachain head size is <= `MaxParaHeadSize` let updated_head = match StoredParaHeadOf::::try_from_inner(updated_head) { Ok(updated_head) => updated_head, Err(e) => { log::trace!( target: LOG_TARGET, "{}. The parachain head size for {:?} is {}. It exceeds maximal configured size {}.", err_log_prefix, parachain, e.value_size, e.maximal_size, ); Self::deposit_event(Event::RejectedLargeParachainHead { parachain, parachain_head_hash: updated_head_hash, parachain_head_size: e.value_size as _, }); return Err(()) }, }; let next_imported_hash_position = stored_best_head .map_or(0, |stored_best_head| stored_best_head.next_imported_hash_position); // insert updated best parachain head let head_hash_to_prune = ImportedParaHashes::::try_get(parachain, next_imported_hash_position); let updated_best_para_head = ParaInfo { best_head_hash: BestParaHeadHash { at_relay_block_number: updated_at_relay_block_number, head_hash: updated_head_hash, }, next_imported_hash_position: (next_imported_hash_position + 1) % T::HeadsToKeep::get(), }; ImportedParaHashes::::insert( parachain, next_imported_hash_position, updated_head_hash, ); ImportedParaHeads::::insert(parachain, updated_head_hash, updated_head); log::trace!( target: LOG_TARGET, "Updated head of parachain {:?} to {}", parachain, updated_head_hash, ); // remove old head let prune_happened = head_hash_to_prune.is_ok(); if let Ok(head_hash_to_prune) = head_hash_to_prune { log::trace!( target: LOG_TARGET, "Pruning old head of parachain {:?}: {}", parachain, head_hash_to_prune, ); ImportedParaHeads::::remove(parachain, head_hash_to_prune); } Self::deposit_event(Event::UpdatedParachainHead { parachain, parachain_head_hash: updated_head_hash, }); Ok(UpdateParachainHeadArtifacts { best_head: updated_best_para_head, prune_happened }) } } #[pallet::genesis_config] pub struct GenesisConfig, I: 'static = ()> { /// Initial pallet operating mode. pub operating_mode: BasicOperatingMode, /// Initial pallet owner. pub owner: Option, /// Dummy marker. pub phantom: sp_std::marker::PhantomData, } #[cfg(feature = "std")] impl, I: 'static> Default for GenesisConfig { fn default() -> Self { Self { operating_mode: Default::default(), owner: Default::default(), phantom: Default::default(), } } } #[pallet::genesis_build] impl, I: 'static> GenesisBuild for GenesisConfig { fn build(&self) { PalletOperatingMode::::put(self.operating_mode); if let Some(ref owner) = self.owner { PalletOwner::::put(owner); } } } } #[cfg(test)] mod tests { use super::*; use crate::mock::{ run_test, test_relay_header, RuntimeEvent as TestEvent, RuntimeOrigin, TestRuntime, MAXIMAL_PARACHAIN_HEAD_SIZE, PARAS_PALLET_NAME, UNTRACKED_PARACHAIN_ID, }; use codec::Encode; use bp_parachains::{BestParaHeadHash, ImportedParaHeadsKeyProvider, ParasInfoKeyProvider}; use bp_runtime::{ record_all_trie_keys, BasicOperatingMode, OwnedBridgeModuleError, StorageDoubleMapKeyProvider, StorageMapKeyProvider, }; use bp_test_utils::{ authority_list, generate_owned_bridge_module_tests, make_default_justification, }; use frame_support::{ assert_noop, assert_ok, dispatch::DispatchResultWithPostInfo, storage::generator::{StorageDoubleMap, StorageMap}, traits::{Get, OnInitialize}, weights::Weight, }; use frame_system::{EventRecord, Pallet as System, Phase}; use sp_runtime::DispatchError; use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB, Recorder, TrieMut}; type BridgesGrandpaPalletInstance = pallet_bridge_grandpa::Instance1; type WeightInfo = ::WeightInfo; type DbWeight = ::DbWeight; fn initialize(state_root: RelayBlockHash) { pallet_bridge_grandpa::Pallet::::initialize( RuntimeOrigin::root(), bp_header_chain::InitializationData { header: Box::new(test_relay_header(0, state_root)), authority_list: authority_list(), set_id: 1, operating_mode: BasicOperatingMode::Normal, }, ) .unwrap(); } fn proceed(num: RelayBlockNumber, state_root: RelayBlockHash) { pallet_bridge_grandpa::Pallet::::on_initialize( 0, ); let header = test_relay_header(num, state_root); let justification = make_default_justification(&header); assert_ok!( pallet_bridge_grandpa::Pallet::::submit_finality_proof( RuntimeOrigin::signed(1), Box::new(header), justification, ) ); } fn prepare_parachain_heads_proof( heads: Vec<(u32, ParaHead)>, ) -> (RelayBlockHash, ParaHeadsProof, Vec<(ParaId, ParaHash)>) { let mut parachains = Vec::with_capacity(heads.len()); let mut root = Default::default(); let mut mdb = MemoryDB::default(); { let mut trie = TrieDBMutBuilderV1::::new(&mut mdb, &mut root).build(); for (parachain, head) in heads { let storage_key = parachain_head_storage_key_at_source(PARAS_PALLET_NAME, ParaId(parachain)); trie.insert(&storage_key.0, &head.encode()) .map_err(|_| "TrieMut::insert has failed") .expect("TrieMut::insert should not fail in tests"); parachains.push((ParaId(parachain), head.hash())); } } // generate storage proof to be delivered to This chain let mut proof_recorder = Recorder::>::new(); record_all_trie_keys::, _>(&mdb, &root, &mut proof_recorder) .map_err(|_| "record_all_trie_keys has failed") .expect("record_all_trie_keys should not fail in benchmarks"); let storage_proof = proof_recorder.drain().into_iter().map(|n| n.data.to_vec()).collect(); (root, ParaHeadsProof(storage_proof), parachains) } fn initial_best_head(parachain: u32) -> ParaInfo { ParaInfo { best_head_hash: BestParaHeadHash { at_relay_block_number: 0, head_hash: head_data(parachain, 0).hash(), }, next_imported_hash_position: 1, } } fn head_data(parachain: u32, head_number: u32) -> ParaHead { ParaHead((parachain, head_number).encode()) } fn large_head_data(parachain: u32, head_number: u32) -> ParaHead { ParaHead( (parachain, head_number, vec![42u8; MAXIMAL_PARACHAIN_HEAD_SIZE as usize]).encode(), ) } fn head_hash(parachain: u32, head_number: u32) -> ParaHash { head_data(parachain, head_number).hash() } fn import_parachain_1_head( relay_chain_block: RelayBlockNumber, relay_state_root: RelayBlockHash, parachains: Vec<(ParaId, ParaHash)>, proof: ParaHeadsProof, ) -> DispatchResultWithPostInfo { Pallet::::submit_parachain_heads( RuntimeOrigin::signed(1), (relay_chain_block, test_relay_header(relay_chain_block, relay_state_root).hash()), parachains, proof, ) } fn weight_of_import_parachain_1_head(proof: &ParaHeadsProof, prune_expected: bool) -> Weight { let db_weight = ::DbWeight::get(); WeightInfoOf::::submit_parachain_heads_weight(db_weight, proof, 1) .saturating_sub(if prune_expected { Weight::zero() } else { WeightInfoOf::::parachain_head_pruning_weight(db_weight) }) } #[test] fn submit_parachain_heads_checks_operating_mode() { let (state_root, proof, parachains) = prepare_parachain_heads_proof(vec![(1, head_data(1, 0))]); run_test(|| { initialize(state_root); // `submit_parachain_heads()` should fail when the pallet is halted. PalletOperatingMode::::put(BasicOperatingMode::Halted); assert_noop!( Pallet::::submit_parachain_heads( RuntimeOrigin::signed(1), (0, test_relay_header(0, state_root).hash()), parachains.clone(), proof.clone(), ), Error::::BridgeModule(OwnedBridgeModuleError::Halted) ); // `submit_parachain_heads()` should succeed now that the pallet is resumed. PalletOperatingMode::::put(BasicOperatingMode::Normal); assert_ok!(Pallet::::submit_parachain_heads( RuntimeOrigin::signed(1), (0, test_relay_header(0, state_root).hash()), parachains, proof, ),); }); } #[test] fn imports_initial_parachain_heads() { let (state_root, proof, parachains) = prepare_parachain_heads_proof(vec![(1, head_data(1, 0)), (3, head_data(3, 10))]); run_test(|| { initialize(state_root); // we're trying to update heads of parachains 1, 2 and 3 let expected_weight = WeightInfo::submit_parachain_heads_weight(DbWeight::get(), &proof, 2); let result = Pallet::::submit_parachain_heads( RuntimeOrigin::signed(1), (0, test_relay_header(0, state_root).hash()), parachains, proof, ); assert_ok!(result); assert_eq!(result.expect("checked above").actual_weight, Some(expected_weight)); // but only 1 and 2 are updated, because proof is missing head of parachain#2 assert_eq!(ParasInfo::::get(ParaId(1)), Some(initial_best_head(1))); assert_eq!(ParasInfo::::get(ParaId(2)), None); assert_eq!( ParasInfo::::get(ParaId(3)), Some(ParaInfo { best_head_hash: BestParaHeadHash { at_relay_block_number: 0, head_hash: head_data(3, 10).hash() }, next_imported_hash_position: 1, }) ); assert_eq!( ImportedParaHeads::::get( ParaId(1), initial_best_head(1).best_head_hash.head_hash ) .map(|h| h.into_inner()), Some(head_data(1, 0)) ); assert_eq!( ImportedParaHeads::::get( ParaId(2), initial_best_head(2).best_head_hash.head_hash ) .map(|h| h.into_inner()), None ); assert_eq!( ImportedParaHeads::::get(ParaId(3), head_hash(3, 10)) .map(|h| h.into_inner()), Some(head_data(3, 10)) ); assert_eq!( System::::events(), vec![ EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(1), parachain_head_hash: initial_best_head(1).best_head_hash.head_hash, }), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(3), parachain_head_hash: head_data(3, 10).hash(), }), topics: vec![], } ], ); }); } #[test] fn imports_parachain_heads_is_able_to_progress() { let (state_root_5, proof_5, parachains_5) = prepare_parachain_heads_proof(vec![(1, head_data(1, 5))]); let (state_root_10, proof_10, parachains_10) = prepare_parachain_heads_proof(vec![(1, head_data(1, 10))]); run_test(|| { // start with relay block #0 and import head#5 of parachain#1 initialize(state_root_5); assert_ok!(import_parachain_1_head(0, state_root_5, parachains_5, proof_5)); assert_eq!( ParasInfo::::get(ParaId(1)), Some(ParaInfo { best_head_hash: BestParaHeadHash { at_relay_block_number: 0, head_hash: head_data(1, 5).hash() }, next_imported_hash_position: 1, }) ); assert_eq!( ImportedParaHeads::::get(ParaId(1), head_data(1, 5).hash()) .map(|h| h.into_inner()), Some(head_data(1, 5)) ); assert_eq!( ImportedParaHeads::::get(ParaId(1), head_data(1, 10).hash()) .map(|h| h.into_inner()), None ); assert_eq!( System::::events(), vec![EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(1), parachain_head_hash: head_data(1, 5).hash(), }), topics: vec![], }], ); // import head#10 of parachain#1 at relay block #1 proceed(1, state_root_10); assert_ok!(import_parachain_1_head(1, state_root_10, parachains_10, proof_10)); assert_eq!( ParasInfo::::get(ParaId(1)), Some(ParaInfo { best_head_hash: BestParaHeadHash { at_relay_block_number: 1, head_hash: head_data(1, 10).hash() }, next_imported_hash_position: 2, }) ); assert_eq!( ImportedParaHeads::::get(ParaId(1), head_data(1, 5).hash()) .map(|h| h.into_inner()), Some(head_data(1, 5)) ); assert_eq!( ImportedParaHeads::::get(ParaId(1), head_data(1, 10).hash()) .map(|h| h.into_inner()), Some(head_data(1, 10)) ); assert_eq!( System::::events(), vec![ EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(1), parachain_head_hash: head_data(1, 5).hash(), }), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(1), parachain_head_hash: head_data(1, 10).hash(), }), topics: vec![], } ], ); }); } #[test] fn ignores_untracked_parachain() { let (state_root, proof, parachains) = prepare_parachain_heads_proof(vec![ (1, head_data(1, 5)), (UNTRACKED_PARACHAIN_ID, head_data(1, 5)), (2, head_data(1, 5)), ]); run_test(|| { // start with relay block #0 and try to import head#5 of parachain#1 and untracked // parachain let expected_weight = WeightInfo::submit_parachain_heads_weight(DbWeight::get(), &proof, 3) .saturating_sub(WeightInfo::parachain_head_storage_write_weight( DbWeight::get(), )); initialize(state_root); let result = Pallet::::submit_parachain_heads( RuntimeOrigin::signed(1), (0, test_relay_header(0, state_root).hash()), parachains, proof, ); assert_ok!(result); assert_eq!(result.expect("checked above").actual_weight, Some(expected_weight)); assert_eq!( ParasInfo::::get(ParaId(1)), Some(ParaInfo { best_head_hash: BestParaHeadHash { at_relay_block_number: 0, head_hash: head_data(1, 5).hash() }, next_imported_hash_position: 1, }) ); assert_eq!(ParasInfo::::get(ParaId(UNTRACKED_PARACHAIN_ID)), None,); assert_eq!( ParasInfo::::get(ParaId(2)), Some(ParaInfo { best_head_hash: BestParaHeadHash { at_relay_block_number: 0, head_hash: head_data(1, 5).hash() }, next_imported_hash_position: 1, }) ); assert_eq!( System::::events(), vec![ EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(1), parachain_head_hash: head_data(1, 5).hash(), }), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UntrackedParachainRejected { parachain: ParaId(UNTRACKED_PARACHAIN_ID), }), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(2), parachain_head_hash: head_data(1, 5).hash(), }), topics: vec![], } ], ); }); } #[test] fn does_nothing_when_already_imported_this_head_at_previous_relay_header() { let (state_root, proof, parachains) = prepare_parachain_heads_proof(vec![(1, head_data(1, 0))]); run_test(|| { // import head#0 of parachain#1 at relay block#0 initialize(state_root); assert_ok!(import_parachain_1_head(0, state_root, parachains.clone(), proof.clone())); assert_eq!(ParasInfo::::get(ParaId(1)), Some(initial_best_head(1))); assert_eq!( System::::events(), vec![EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(1), parachain_head_hash: initial_best_head(1).best_head_hash.head_hash, }), topics: vec![], }], ); // try to import head#0 of parachain#1 at relay block#1 // => call succeeds, but nothing is changed proceed(1, state_root); assert_ok!(import_parachain_1_head(1, state_root, parachains, proof)); assert_eq!(ParasInfo::::get(ParaId(1)), Some(initial_best_head(1))); assert_eq!( System::::events(), vec![ EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(1), parachain_head_hash: initial_best_head(1).best_head_hash.head_hash, }), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::RejectedObsoleteParachainHead { parachain: ParaId(1), parachain_head_hash: initial_best_head(1).best_head_hash.head_hash, }), topics: vec![], } ], ); }); } #[test] fn does_nothing_when_already_imported_head_at_better_relay_header() { let (state_root_5, proof_5, parachains_5) = prepare_parachain_heads_proof(vec![(1, head_data(1, 5))]); let (state_root_10, proof_10, parachains_10) = prepare_parachain_heads_proof(vec![(1, head_data(1, 10))]); run_test(|| { // start with relay block #0 initialize(state_root_5); // head#10 of parachain#1 at relay block#1 proceed(1, state_root_10); assert_ok!(import_parachain_1_head(1, state_root_10, parachains_10, proof_10)); assert_eq!( ParasInfo::::get(ParaId(1)), Some(ParaInfo { best_head_hash: BestParaHeadHash { at_relay_block_number: 1, head_hash: head_data(1, 10).hash() }, next_imported_hash_position: 1, }) ); assert_eq!( System::::events(), vec![EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(1), parachain_head_hash: head_data(1, 10).hash(), }), topics: vec![], }], ); // now try to import head#5 at relay block#0 // => nothing is changed, because better head has already been imported assert_ok!(import_parachain_1_head(0, state_root_5, parachains_5, proof_5)); assert_eq!( ParasInfo::::get(ParaId(1)), Some(ParaInfo { best_head_hash: BestParaHeadHash { at_relay_block_number: 1, head_hash: head_data(1, 10).hash() }, next_imported_hash_position: 1, }) ); assert_eq!( System::::events(), vec![ EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(1), parachain_head_hash: head_data(1, 10).hash(), }), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::RejectedObsoleteParachainHead { parachain: ParaId(1), parachain_head_hash: head_data(1, 5).hash(), }), topics: vec![], } ], ); }); } #[test] fn does_nothing_when_parachain_head_is_too_large() { let (state_root, proof, parachains) = prepare_parachain_heads_proof(vec![(1, head_data(1, 5)), (2, large_head_data(1, 5))]); run_test(|| { // start with relay block #0 and try to import head#5 of parachain#1 and untracked // parachain initialize(state_root); let result = Pallet::::submit_parachain_heads( RuntimeOrigin::signed(1), (0, test_relay_header(0, state_root).hash()), parachains, proof, ); assert_ok!(result); assert_eq!( ParasInfo::::get(ParaId(1)), Some(ParaInfo { best_head_hash: BestParaHeadHash { at_relay_block_number: 0, head_hash: head_data(1, 5).hash() }, next_imported_hash_position: 1, }) ); assert_eq!(ParasInfo::::get(ParaId(2)), None); assert_eq!( System::::events(), vec![ EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::UpdatedParachainHead { parachain: ParaId(1), parachain_head_hash: head_data(1, 5).hash(), }), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::RejectedLargeParachainHead { parachain: ParaId(2), parachain_head_hash: large_head_data(1, 5).hash(), parachain_head_size: large_head_data(1, 5).encoded_size() as u32, }), topics: vec![], }, ], ); }); } #[test] fn prunes_old_heads() { run_test(|| { let heads_to_keep = crate::mock::HeadsToKeep::get(); // import exactly `HeadsToKeep` headers for i in 0..heads_to_keep { let (state_root, proof, parachains) = prepare_parachain_heads_proof(vec![(1, head_data(1, i))]); if i == 0 { initialize(state_root); } else { proceed(i, state_root); } let expected_weight = weight_of_import_parachain_1_head(&proof, false); let result = import_parachain_1_head(i, state_root, parachains, proof); assert_ok!(result); assert_eq!(result.expect("checked above").actual_weight, Some(expected_weight)); } // nothing is pruned yet for i in 0..heads_to_keep { assert!(ImportedParaHeads::::get(ParaId(1), head_data(1, i).hash()) .is_some()); } // import next relay chain header and next parachain head let (state_root, proof, parachains) = prepare_parachain_heads_proof(vec![(1, head_data(1, heads_to_keep))]); proceed(heads_to_keep, state_root); let expected_weight = weight_of_import_parachain_1_head(&proof, true); let result = import_parachain_1_head(heads_to_keep, state_root, parachains, proof); assert_ok!(result); assert_eq!(result.expect("checked above").actual_weight, Some(expected_weight)); // and the head#0 is pruned assert!( ImportedParaHeads::::get(ParaId(1), head_data(1, 0).hash()).is_none() ); for i in 1..=heads_to_keep { assert!(ImportedParaHeads::::get(ParaId(1), head_data(1, i).hash()) .is_some()); } }); } #[test] fn fails_on_unknown_relay_chain_block() { let (state_root, proof, parachains) = prepare_parachain_heads_proof(vec![(1, head_data(1, 5))]); run_test(|| { // start with relay block #0 initialize(state_root); // try to import head#5 of parachain#1 at unknown relay chain block #1 assert_noop!( import_parachain_1_head(1, state_root, parachains, proof), Error::::UnknownRelayChainBlock ); }); } #[test] fn fails_on_invalid_storage_proof() { let (_state_root, proof, parachains) = prepare_parachain_heads_proof(vec![(1, head_data(1, 5))]); run_test(|| { // start with relay block #0 initialize(Default::default()); // try to import head#5 of parachain#1 at relay chain block #0 assert_noop!( import_parachain_1_head(0, Default::default(), parachains, proof), Error::::InvalidStorageProof ); }); } #[test] fn is_not_rewriting_existing_head_if_failed_to_read_updated_head() { let (state_root_5, proof_5, parachains_5) = prepare_parachain_heads_proof(vec![(1, head_data(1, 5))]); let (state_root_10_at_20, proof_10_at_20, parachains_10_at_20) = prepare_parachain_heads_proof(vec![(2, head_data(2, 10))]); let (state_root_10_at_30, proof_10_at_30, parachains_10_at_30) = prepare_parachain_heads_proof(vec![(1, head_data(1, 10))]); run_test(|| { // we've already imported head#5 of parachain#1 at relay block#10 initialize(state_root_5); import_parachain_1_head(0, state_root_5, parachains_5, proof_5).expect("ok"); assert_eq!( Pallet::::best_parachain_head(ParaId(1)), Some(head_data(1, 5)) ); // then if someone is pretending to provide updated head#10 of parachain#1 at relay // block#20, but fails to do that // // => we'll leave previous value proceed(20, state_root_10_at_20); assert_ok!(Pallet::::submit_parachain_heads( RuntimeOrigin::signed(1), (20, test_relay_header(20, state_root_10_at_20).hash()), parachains_10_at_20, proof_10_at_20, ),); assert_eq!( Pallet::::best_parachain_head(ParaId(1)), Some(head_data(1, 5)) ); // then if someone is pretending to provide updated head#10 of parachain#1 at relay // block#30, and actualy provides it // // => we'll update value proceed(30, state_root_10_at_30); assert_ok!(Pallet::::submit_parachain_heads( RuntimeOrigin::signed(1), (30, test_relay_header(30, state_root_10_at_30).hash()), parachains_10_at_30, proof_10_at_30, ),); assert_eq!( Pallet::::best_parachain_head(ParaId(1)), Some(head_data(1, 10)) ); }); } #[test] fn storage_keys_computed_properly() { assert_eq!( ParasInfo::::storage_map_final_key(ParaId(42)).to_vec(), ParasInfoKeyProvider::final_key("Parachains", &ParaId(42)).0 ); assert_eq!( ImportedParaHeads::::storage_double_map_final_key( ParaId(42), ParaHash::from([21u8; 32]) ) .to_vec(), ImportedParaHeadsKeyProvider::final_key( "Parachains", &ParaId(42), &ParaHash::from([21u8; 32]) ) .0, ); } #[test] fn ignores_parachain_head_if_it_is_missing_from_storage_proof() { let (state_root, proof, _) = prepare_parachain_heads_proof(vec![(1, head_data(1, 0))]); let parachains = vec![(ParaId(2), Default::default())]; run_test(|| { initialize(state_root); assert_ok!(Pallet::::submit_parachain_heads( RuntimeOrigin::signed(1), (0, test_relay_header(0, state_root).hash()), parachains, proof, )); assert_eq!( System::::events(), vec![EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::MissingParachainHead { parachain: ParaId(2), }), topics: vec![], }], ); }); } #[test] fn ignores_parachain_head_if_parachain_head_hash_is_wrong() { let (state_root, proof, _) = prepare_parachain_heads_proof(vec![(1, head_data(1, 0))]); let parachains = vec![(ParaId(1), head_data(1, 10).hash())]; run_test(|| { initialize(state_root); assert_ok!(Pallet::::submit_parachain_heads( RuntimeOrigin::signed(1), (0, test_relay_header(0, state_root).hash()), parachains, proof, )); assert_eq!( System::::events(), vec![EventRecord { phase: Phase::Initialization, event: TestEvent::Parachains(Event::IncorrectParachainHeadHash { parachain: ParaId(1), parachain_head_hash: head_data(1, 10).hash(), actual_parachain_head_hash: head_data(1, 0).hash(), }), topics: vec![], }], ); }); } generate_owned_bridge_module_tests!(BasicOperatingMode::Normal, BasicOperatingMode::Halted); }