Skip to content
[package]
name = "pallet-bridge-beefy"
version = "0.1.0"
description = "Module implementing BEEFY on-chain light client used for bridging consensus of substrate-based chains."
authors = ["Parity Technologies <[email protected]>"]
edition = "2021"
license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
[lints]
workspace = true
[dependencies]
codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false }
log = { workspace = true }
scale-info = { version = "2.10.0", default-features = false, features = ["derive"] }
serde = { optional = true, workspace = true }
# Bridge Dependencies
bp-beefy = { path = "../../primitives/beefy", default-features = false }
bp-runtime = { path = "../../primitives/runtime", default-features = false }
# Substrate Dependencies
frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
[dev-dependencies]
sp-consensus-beefy = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" }
mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.3.2" }
pallet-beefy-mmr = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" }
pallet-mmr = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" }
rand = "0.8"
sp-io = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" }
bp-test-utils = { path = "../../primitives/test-utils" }
[features]
default = [ "std" ]
std = [
"bp-beefy/std",
"bp-runtime/std",
"codec/std",
"frame-support/std",
"frame-system/std",
"log/std",
"scale-info/std",
"serde",
"sp-core/std",
"sp-runtime/std",
"sp-std/std",
]
try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime" ]
// Copyright 2021 Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! BEEFY bridge pallet.
//!
//! This pallet is an on-chain BEEFY light client for Substrate-based chains that are using the
//! following pallets bundle: `pallet-mmr`, `pallet-beefy` and `pallet-beefy-mmr`.
//!
//! The pallet is able to verify MMR leaf proofs and BEEFY commitments, so it has access
//! to the following data of the bridged chain:
//!
//! - header hashes
//! - changes of BEEFY authorities
//! - extra data of MMR leafs
//!
//! Given the header hash, other pallets are able to verify header-based proofs
//! (e.g. storage proofs, transaction inclusion proofs, etc.).
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
use bp_beefy::{ChainWithBeefy, InitializationData};
use sp_std::{boxed::Box, prelude::*};
// Re-export in crate namespace for `construct_runtime!`
pub use pallet::*;
mod utils;
#[cfg(test)]
mod mock;
#[cfg(test)]
mod mock_chain;
/// The target that will be used when publishing logs related to this pallet.
pub const LOG_TARGET: &str = "runtime::bridge-beefy";
/// Configured bridged chain.
pub type BridgedChain<T, I> = <T as Config<I>>::BridgedChain;
/// Block number, used by configured bridged chain.
pub type BridgedBlockNumber<T, I> = bp_runtime::BlockNumberOf<BridgedChain<T, I>>;
/// Block hash, used by configured bridged chain.
pub type BridgedBlockHash<T, I> = bp_runtime::HashOf<BridgedChain<T, I>>;
/// Pallet initialization data.
pub type InitializationDataOf<T, I> =
InitializationData<BridgedBlockNumber<T, I>, bp_beefy::MmrHashOf<BridgedChain<T, I>>>;
/// BEEFY commitment hasher, used by configured bridged chain.
pub type BridgedBeefyCommitmentHasher<T, I> = bp_beefy::BeefyCommitmentHasher<BridgedChain<T, I>>;
/// BEEFY validator id, used by configured bridged chain.
pub type BridgedBeefyAuthorityId<T, I> = bp_beefy::BeefyAuthorityIdOf<BridgedChain<T, I>>;
/// BEEFY validator set, used by configured bridged chain.
pub type BridgedBeefyAuthoritySet<T, I> = bp_beefy::BeefyAuthoritySetOf<BridgedChain<T, I>>;
/// BEEFY authority set, used by configured bridged chain.
pub type BridgedBeefyAuthoritySetInfo<T, I> = bp_beefy::BeefyAuthoritySetInfoOf<BridgedChain<T, I>>;
/// BEEFY signed commitment, used by configured bridged chain.
pub type BridgedBeefySignedCommitment<T, I> = bp_beefy::BeefySignedCommitmentOf<BridgedChain<T, I>>;
/// MMR hashing algorithm, used by configured bridged chain.
pub type BridgedMmrHashing<T, I> = bp_beefy::MmrHashingOf<BridgedChain<T, I>>;
/// MMR hashing output type of `BridgedMmrHashing<T, I>`.
pub type BridgedMmrHash<T, I> = bp_beefy::MmrHashOf<BridgedChain<T, I>>;
/// The type of the MMR leaf extra data used by the configured bridged chain.
pub type BridgedBeefyMmrLeafExtra<T, I> = bp_beefy::BeefyMmrLeafExtraOf<BridgedChain<T, I>>;
/// BEEFY MMR proof type used by the pallet
pub type BridgedMmrProof<T, I> = bp_beefy::MmrProofOf<BridgedChain<T, I>>;
/// MMR leaf type, used by configured bridged chain.
pub type BridgedBeefyMmrLeaf<T, I> = bp_beefy::BeefyMmrLeafOf<BridgedChain<T, I>>;
/// Imported commitment data, stored by the pallet.
pub type ImportedCommitment<T, I> = bp_beefy::ImportedCommitment<
BridgedBlockNumber<T, I>,
BridgedBlockHash<T, I>,
BridgedMmrHash<T, I>,
>;
/// Some high level info about the imported commitments.
#[derive(codec::Encode, codec::Decode, scale_info::TypeInfo)]
pub struct ImportedCommitmentsInfoData<BlockNumber> {
/// Best known block number, provided in a BEEFY commitment. However this is not
/// the best proven block. The best proven block is this block's parent.
best_block_number: BlockNumber,
/// The head of the `ImportedBlockNumbers` ring buffer.
next_block_number_index: u32,
}
#[frame_support::pallet(dev_mode)]
pub mod pallet {
use super::*;
use bp_runtime::{BasicOperatingMode, OwnedBridgeModule};
use frame_support::pallet_prelude::*;
use frame_system::pallet_prelude::*;
#[pallet::config]
pub trait Config<I: 'static = ()>: frame_system::Config {
/// The upper bound on the number of requests allowed by the pallet.
///
/// A request refers to an action which writes a header to storage.
///
/// Once this bound is reached the pallet will reject all commitments
/// until the request count has decreased.
#[pallet::constant]
type MaxRequests: Get<u32>;
/// Maximal number of imported commitments to keep in the storage.
///
/// The setting is there to prevent growing the on-chain state indefinitely. Note
/// the setting does not relate to block numbers - we will simply keep as much items
/// in the storage, so it doesn't guarantee any fixed timeframe for imported commitments.
#[pallet::constant]
type CommitmentsToKeep: Get<u32>;
/// The chain we are bridging to here.
type BridgedChain: ChainWithBeefy;
}
#[pallet::pallet]
#[pallet::without_storage_info]
pub struct Pallet<T, I = ()>(PhantomData<(T, I)>);
#[pallet::hooks]
impl<T: Config<I>, I: 'static> Hooks<BlockNumberFor<T>> for Pallet<T, I> {
fn on_initialize(_n: BlockNumberFor<T>) -> frame_support::weights::Weight {
<RequestCount<T, I>>::mutate(|count| *count = count.saturating_sub(1));
Weight::from_parts(0, 0)
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
}
}
impl<T: Config<I>, I: 'static> OwnedBridgeModule<T> for Pallet<T, I> {
const LOG_TARGET: &'static str = LOG_TARGET;
type OwnerStorage = PalletOwner<T, I>;
type OperatingMode = BasicOperatingMode;
type OperatingModeStorage = PalletOperatingMode<T, I>;
}
#[pallet::call]
impl<T: Config<I>, I: 'static> Pallet<T, I>
where
BridgedMmrHashing<T, I>: 'static + Send + Sync,
{
/// Initialize pallet with BEEFY authority set and best known finalized block number.
#[pallet::call_index(0)]
#[pallet::weight((T::DbWeight::get().reads_writes(2, 3), DispatchClass::Operational))]
pub fn initialize(
origin: OriginFor<T>,
init_data: InitializationDataOf<T, I>,
) -> DispatchResult {
Self::ensure_owner_or_root(origin)?;
let is_initialized = <ImportedCommitmentsInfo<T, I>>::exists();
ensure!(!is_initialized, <Error<T, I>>::AlreadyInitialized);
log::info!(target: LOG_TARGET, "Initializing bridge BEEFY pallet: {:?}", init_data);
Ok(initialize::<T, I>(init_data)?)
}
/// Change `PalletOwner`.
///
/// May only be called either by root, or by `PalletOwner`.
#[pallet::call_index(1)]
#[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))]
pub fn set_owner(origin: OriginFor<T>, new_owner: Option<T::AccountId>) -> DispatchResult {
<Self as OwnedBridgeModule<_>>::set_owner(origin, new_owner)
}
/// Halt or resume all pallet operations.
///
/// May only be called either by root, or by `PalletOwner`.
#[pallet::call_index(2)]
#[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))]
pub fn set_operating_mode(
origin: OriginFor<T>,
operating_mode: BasicOperatingMode,
) -> DispatchResult {
<Self as OwnedBridgeModule<_>>::set_operating_mode(origin, operating_mode)
}
/// Submit a commitment generated by BEEFY authority set.
///
/// It will use the underlying storage pallet to fetch information about the current
/// authority set and best finalized block number in order to verify that the commitment
/// is valid.
///
/// If successful in verification, it will update the underlying storage with the data
/// provided in the newly submitted commitment.
#[pallet::call_index(3)]
#[pallet::weight(0)]
pub fn submit_commitment(
origin: OriginFor<T>,
commitment: BridgedBeefySignedCommitment<T, I>,
validator_set: BridgedBeefyAuthoritySet<T, I>,
mmr_leaf: Box<BridgedBeefyMmrLeaf<T, I>>,
mmr_proof: BridgedMmrProof<T, I>,
) -> DispatchResult
where
BridgedBeefySignedCommitment<T, I>: Clone,
{
Self::ensure_not_halted().map_err(Error::<T, I>::BridgeModule)?;
ensure_signed(origin)?;
ensure!(Self::request_count() < T::MaxRequests::get(), <Error<T, I>>::TooManyRequests);
// Ensure that the commitment is for a better block.
let commitments_info =
ImportedCommitmentsInfo::<T, I>::get().ok_or(Error::<T, I>::NotInitialized)?;
ensure!(
commitment.commitment.block_number > commitments_info.best_block_number,
Error::<T, I>::OldCommitment
);
// Verify commitment and mmr leaf.
let current_authority_set_info = CurrentAuthoritySetInfo::<T, I>::get();
let mmr_root = utils::verify_commitment::<T, I>(
&commitment,
&current_authority_set_info,
&validator_set,
)?;
utils::verify_beefy_mmr_leaf::<T, I>(&mmr_leaf, mmr_proof, mmr_root)?;
// Update request count.
RequestCount::<T, I>::mutate(|count| *count += 1);
// Update authority set if needed.
if mmr_leaf.beefy_next_authority_set.id > current_authority_set_info.id {
CurrentAuthoritySetInfo::<T, I>::put(mmr_leaf.beefy_next_authority_set);
}
// Import commitment.
let block_number_index = commitments_info.next_block_number_index;
let to_prune = ImportedBlockNumbers::<T, I>::try_get(block_number_index);
ImportedCommitments::<T, I>::insert(
commitment.commitment.block_number,
ImportedCommitment::<T, I> {
parent_number_and_hash: mmr_leaf.parent_number_and_hash,
mmr_root,
},
);
ImportedBlockNumbers::<T, I>::insert(
block_number_index,
commitment.commitment.block_number,
);
ImportedCommitmentsInfo::<T, I>::put(ImportedCommitmentsInfoData {
best_block_number: commitment.commitment.block_number,
next_block_number_index: (block_number_index + 1) % T::CommitmentsToKeep::get(),
});
if let Ok(old_block_number) = to_prune {
log::debug!(
target: LOG_TARGET,
"Pruning commitment for old block: {:?}.",
old_block_number
);
ImportedCommitments::<T, I>::remove(old_block_number);
}
log::info!(
target: LOG_TARGET,
"Successfully imported commitment for block {:?}",
commitment.commitment.block_number,
);
Ok(())
}
}
/// The current number of requests which have written to storage.
///
/// If the `RequestCount` hits `MaxRequests`, no more calls will be allowed to the pallet until
/// the request capacity is increased.
///
/// The `RequestCount` is decreased by one at the beginning of every block. This is to ensure
/// that the pallet can always make progress.
#[pallet::storage]
#[pallet::getter(fn request_count)]
pub type RequestCount<T: Config<I>, I: 'static = ()> = StorageValue<_, u32, ValueQuery>;
/// High level info about the imported commitments.
///
/// Contains the following info:
/// - best known block number of the bridged chain, finalized by BEEFY
/// - the head of the `ImportedBlockNumbers` ring buffer
#[pallet::storage]
pub type ImportedCommitmentsInfo<T: Config<I>, I: 'static = ()> =
StorageValue<_, ImportedCommitmentsInfoData<BridgedBlockNumber<T, I>>>;
/// A ring buffer containing the block numbers of the commitments that we have imported,
/// ordered by the insertion time.
#[pallet::storage]
pub(super) type ImportedBlockNumbers<T: Config<I>, I: 'static = ()> =
StorageMap<_, Identity, u32, BridgedBlockNumber<T, I>>;
/// All the commitments that we have imported and haven't been pruned yet.
#[pallet::storage]
pub type ImportedCommitments<T: Config<I>, I: 'static = ()> =
StorageMap<_, Blake2_128Concat, BridgedBlockNumber<T, I>, ImportedCommitment<T, I>>;
/// The current BEEFY authority set at the bridged chain.
#[pallet::storage]
pub type CurrentAuthoritySetInfo<T: Config<I>, I: 'static = ()> =
StorageValue<_, BridgedBeefyAuthoritySetInfo<T, I>, ValueQuery>;
/// Optional pallet owner.
///
/// Pallet owner has the right to halt all pallet operations and then resume it. If it is
/// `None`, then there are no direct ways to halt/resume pallet operations, but other
/// runtime methods may still be used to do that (i.e. `democracy::referendum` to update halt
/// flag directly or calling `halt_operations`).
#[pallet::storage]
pub type PalletOwner<T: Config<I>, I: 'static = ()> =
StorageValue<_, T::AccountId, OptionQuery>;
/// The current operating mode of the pallet.
///
/// Depending on the mode either all, or no transactions will be allowed.
#[pallet::storage]
pub type PalletOperatingMode<T: Config<I>, I: 'static = ()> =
StorageValue<_, BasicOperatingMode, ValueQuery>;
#[pallet::genesis_config]
#[derive(frame_support::DefaultNoBound)]
pub struct GenesisConfig<T: Config<I>, I: 'static = ()> {
/// Optional module owner account.
pub owner: Option<T::AccountId>,
/// Optional module initialization data.
pub init_data: Option<InitializationDataOf<T, I>>,
}
#[pallet::genesis_build]
impl<T: Config<I>, I: 'static> BuildGenesisConfig for GenesisConfig<T, I> {
fn build(&self) {
if let Some(ref owner) = self.owner {
<PalletOwner<T, I>>::put(owner);
}
if let Some(init_data) = self.init_data.clone() {
initialize::<T, I>(init_data)
.expect("invalid initialization data of BEEFY bridge pallet");
} else {
// Since the bridge hasn't been initialized we shouldn't allow anyone to perform
// transactions.
<PalletOperatingMode<T, I>>::put(BasicOperatingMode::Halted);
}
}
}
#[pallet::error]
pub enum Error<T, I = ()> {
/// The pallet has not been initialized yet.
NotInitialized,
/// The pallet has already been initialized.
AlreadyInitialized,
/// Invalid initial authority set.
InvalidInitialAuthoritySet,
/// There are too many requests for the current window to handle.
TooManyRequests,
/// The imported commitment is older than the best commitment known to the pallet.
OldCommitment,
/// The commitment is signed by unknown validator set.
InvalidCommitmentValidatorSetId,
/// The id of the provided validator set is invalid.
InvalidValidatorSetId,
/// The number of signatures in the commitment is invalid.
InvalidCommitmentSignaturesLen,
/// The number of validator ids provided is invalid.
InvalidValidatorSetLen,
/// There aren't enough correct signatures in the commitment to finalize the block.
NotEnoughCorrectSignatures,
/// MMR root is missing from the commitment.
MmrRootMissingFromCommitment,
/// MMR proof verification has failed.
MmrProofVerificationFailed,
/// The validators are not matching the merkle tree root of the authority set.
InvalidValidatorSetRoot,
/// Error generated by the `OwnedBridgeModule` trait.
BridgeModule(bp_runtime::OwnedBridgeModuleError),
}
/// Initialize pallet with given parameters.
pub(super) fn initialize<T: Config<I>, I: 'static>(
init_data: InitializationDataOf<T, I>,
) -> Result<(), Error<T, I>> {
if init_data.authority_set.len == 0 {
return Err(Error::<T, I>::InvalidInitialAuthoritySet)
}
CurrentAuthoritySetInfo::<T, I>::put(init_data.authority_set);
<PalletOperatingMode<T, I>>::put(init_data.operating_mode);
ImportedCommitmentsInfo::<T, I>::put(ImportedCommitmentsInfoData {
best_block_number: init_data.best_block_number,
next_block_number_index: 0,
});
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use bp_runtime::{BasicOperatingMode, OwnedBridgeModuleError};
use bp_test_utils::generate_owned_bridge_module_tests;
use frame_support::{assert_noop, assert_ok, traits::Get};
use mock::*;
use mock_chain::*;
use sp_consensus_beefy::mmr::BeefyAuthoritySet;
use sp_runtime::DispatchError;
fn next_block() {
use frame_support::traits::OnInitialize;
let current_number = frame_system::Pallet::<TestRuntime>::block_number();
frame_system::Pallet::<TestRuntime>::set_block_number(current_number + 1);
let _ = Pallet::<TestRuntime>::on_initialize(current_number);
}
fn import_header_chain(headers: Vec<HeaderAndCommitment>) {
for header in headers {
if header.commitment.is_some() {
assert_ok!(import_commitment(header));
}
}
}
#[test]
fn fails_to_initialize_if_already_initialized() {
run_test_with_initialize(32, || {
assert_noop!(
Pallet::<TestRuntime>::initialize(
RuntimeOrigin::root(),
InitializationData {
operating_mode: BasicOperatingMode::Normal,
best_block_number: 0,
authority_set: BeefyAuthoritySet {
id: 0,
len: 1,
keyset_commitment: [0u8; 32].into()
}
}
),
Error::<TestRuntime, ()>::AlreadyInitialized,
);
});
}
#[test]
fn fails_to_initialize_if_authority_set_is_empty() {
run_test(|| {
assert_noop!(
Pallet::<TestRuntime>::initialize(
RuntimeOrigin::root(),
InitializationData {
operating_mode: BasicOperatingMode::Normal,
best_block_number: 0,
authority_set: BeefyAuthoritySet {
id: 0,
len: 0,
keyset_commitment: [0u8; 32].into()
}
}
),
Error::<TestRuntime, ()>::InvalidInitialAuthoritySet,
);
});
}
#[test]
fn fails_to_import_commitment_if_halted() {
run_test_with_initialize(1, || {
assert_ok!(Pallet::<TestRuntime>::set_operating_mode(
RuntimeOrigin::root(),
BasicOperatingMode::Halted
));
assert_noop!(
import_commitment(ChainBuilder::new(1).append_finalized_header().to_header()),
Error::<TestRuntime, ()>::BridgeModule(OwnedBridgeModuleError::Halted),
);
})
}
#[test]
fn fails_to_import_commitment_if_too_many_requests() {
run_test_with_initialize(1, || {
let max_requests = <<TestRuntime as Config>::MaxRequests as Get<u32>>::get() as u64;
let mut chain = ChainBuilder::new(1);
for _ in 0..max_requests + 2 {
chain = chain.append_finalized_header();
}
// import `max_request` headers
for i in 0..max_requests {
assert_ok!(import_commitment(chain.header(i + 1)));
}
// try to import next header: it fails because we are no longer accepting commitments
assert_noop!(
import_commitment(chain.header(max_requests + 1)),
Error::<TestRuntime, ()>::TooManyRequests,
);
// when next block is "started", we allow import of next header
next_block();
assert_ok!(import_commitment(chain.header(max_requests + 1)));
// but we can't import two headers until next block and so on
assert_noop!(
import_commitment(chain.header(max_requests + 2)),
Error::<TestRuntime, ()>::TooManyRequests,
);
})
}
#[test]
fn fails_to_import_commitment_if_not_initialized() {
run_test(|| {
assert_noop!(
import_commitment(ChainBuilder::new(1).append_finalized_header().to_header()),
Error::<TestRuntime, ()>::NotInitialized,
);
})
}
#[test]
fn submit_commitment_works_with_long_chain_with_handoffs() {
run_test_with_initialize(3, || {
let chain = ChainBuilder::new(3)
.append_finalized_header()
.append_default_headers(16) // 2..17
.append_finalized_header() // 18
.append_default_headers(16) // 19..34
.append_handoff_header(9) // 35
.append_default_headers(8) // 36..43
.append_finalized_header() // 44
.append_default_headers(8) // 45..52
.append_handoff_header(17) // 53
.append_default_headers(4) // 54..57
.append_finalized_header() // 58
.append_default_headers(4); // 59..63
import_header_chain(chain.to_chain());
assert_eq!(
ImportedCommitmentsInfo::<TestRuntime>::get().unwrap().best_block_number,
58
);
assert_eq!(CurrentAuthoritySetInfo::<TestRuntime>::get().id, 2);
assert_eq!(CurrentAuthoritySetInfo::<TestRuntime>::get().len, 17);
let imported_commitment = ImportedCommitments::<TestRuntime>::get(58).unwrap();
assert_eq!(
imported_commitment,
bp_beefy::ImportedCommitment {
parent_number_and_hash: (57, chain.header(57).header.hash()),
mmr_root: chain.header(58).mmr_root,
},
);
})
}
#[test]
fn commitment_pruning_works() {
run_test_with_initialize(3, || {
let commitments_to_keep = <TestRuntime as Config<()>>::CommitmentsToKeep::get();
let commitments_to_import: Vec<HeaderAndCommitment> = ChainBuilder::new(3)
.append_finalized_headers(commitments_to_keep as usize + 2)
.to_chain();
// import exactly `CommitmentsToKeep` commitments
for index in 0..commitments_to_keep {
next_block();
import_commitment(commitments_to_import[index as usize].clone())
.expect("must succeed");
assert_eq!(
ImportedCommitmentsInfo::<TestRuntime>::get().unwrap().next_block_number_index,
(index + 1) % commitments_to_keep
);
}
// ensure that all commitments are in the storage
assert_eq!(
ImportedCommitmentsInfo::<TestRuntime>::get().unwrap().best_block_number,
commitments_to_keep as TestBridgedBlockNumber
);
assert_eq!(
ImportedCommitmentsInfo::<TestRuntime>::get().unwrap().next_block_number_index,
0
);
for index in 0..commitments_to_keep {
assert!(ImportedCommitments::<TestRuntime>::get(
index as TestBridgedBlockNumber + 1
)
.is_some());
assert_eq!(
ImportedBlockNumbers::<TestRuntime>::get(index),
Some(Into::into(index + 1)),
);
}
// import next commitment
next_block();
import_commitment(commitments_to_import[commitments_to_keep as usize].clone())
.expect("must succeed");
assert_eq!(
ImportedCommitmentsInfo::<TestRuntime>::get().unwrap().next_block_number_index,
1
);
assert!(ImportedCommitments::<TestRuntime>::get(
commitments_to_keep as TestBridgedBlockNumber + 1
)
.is_some());
assert_eq!(
ImportedBlockNumbers::<TestRuntime>::get(0),
Some(Into::into(commitments_to_keep + 1)),
);
// the side effect of the import is that the commitment#1 is pruned
assert!(ImportedCommitments::<TestRuntime>::get(1).is_none());
// import next commitment
next_block();
import_commitment(commitments_to_import[commitments_to_keep as usize + 1].clone())
.expect("must succeed");
assert_eq!(
ImportedCommitmentsInfo::<TestRuntime>::get().unwrap().next_block_number_index,
2
);
assert!(ImportedCommitments::<TestRuntime>::get(
commitments_to_keep as TestBridgedBlockNumber + 2
)
.is_some());
assert_eq!(
ImportedBlockNumbers::<TestRuntime>::get(1),
Some(Into::into(commitments_to_keep + 2)),
);
// the side effect of the import is that the commitment#2 is pruned
assert!(ImportedCommitments::<TestRuntime>::get(1).is_none());
assert!(ImportedCommitments::<TestRuntime>::get(2).is_none());
});
}
generate_owned_bridge_module_tests!(BasicOperatingMode::Normal, BasicOperatingMode::Halted);
}
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
use crate as beefy;
use crate::{
utils::get_authorities_mmr_root, BridgedBeefyAuthoritySet, BridgedBeefyAuthoritySetInfo,
BridgedBeefyCommitmentHasher, BridgedBeefyMmrLeafExtra, BridgedBeefySignedCommitment,
BridgedMmrHash, BridgedMmrHashing, BridgedMmrProof,
};
use bp_beefy::{BeefyValidatorSignatureOf, ChainWithBeefy, Commitment, MmrDataOrHash};
use bp_runtime::{BasicOperatingMode, Chain, ChainId};
use codec::Encode;
use frame_support::{construct_runtime, derive_impl, weights::Weight};
use sp_core::{sr25519::Signature, Pair};
use sp_runtime::{
testing::{Header, H256},
traits::{BlakeTwo256, Hash},
};
pub use sp_consensus_beefy::ecdsa_crypto::{AuthorityId as BeefyId, Pair as BeefyPair};
use sp_core::crypto::Wraps;
use sp_runtime::traits::Keccak256;
pub type TestAccountId = u64;
pub type TestBridgedBlockNumber = u64;
pub type TestBridgedBlockHash = H256;
pub type TestBridgedHeader = Header;
pub type TestBridgedAuthoritySetInfo = BridgedBeefyAuthoritySetInfo<TestRuntime, ()>;
pub type TestBridgedValidatorSet = BridgedBeefyAuthoritySet<TestRuntime, ()>;
pub type TestBridgedCommitment = BridgedBeefySignedCommitment<TestRuntime, ()>;
pub type TestBridgedValidatorSignature = BeefyValidatorSignatureOf<TestBridgedChain>;
pub type TestBridgedCommitmentHasher = BridgedBeefyCommitmentHasher<TestRuntime, ()>;
pub type TestBridgedMmrHashing = BridgedMmrHashing<TestRuntime, ()>;
pub type TestBridgedMmrHash = BridgedMmrHash<TestRuntime, ()>;
pub type TestBridgedBeefyMmrLeafExtra = BridgedBeefyMmrLeafExtra<TestRuntime, ()>;
pub type TestBridgedMmrProof = BridgedMmrProof<TestRuntime, ()>;
pub type TestBridgedRawMmrLeaf = sp_consensus_beefy::mmr::MmrLeaf<
TestBridgedBlockNumber,
TestBridgedBlockHash,
TestBridgedMmrHash,
TestBridgedBeefyMmrLeafExtra,
>;
pub type TestBridgedMmrNode = MmrDataOrHash<Keccak256, TestBridgedRawMmrLeaf>;
type Block = frame_system::mocking::MockBlock<TestRuntime>;
construct_runtime! {
pub enum TestRuntime
{
System: frame_system::{Pallet, Call, Config<T>, Storage, Event<T>},
Beefy: beefy::{Pallet},
}
}
#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)]
impl frame_system::Config for TestRuntime {
type Block = Block;
}
impl beefy::Config for TestRuntime {
type MaxRequests = frame_support::traits::ConstU32<16>;
type BridgedChain = TestBridgedChain;
type CommitmentsToKeep = frame_support::traits::ConstU32<16>;
}
#[derive(Debug)]
pub struct TestBridgedChain;
impl Chain for TestBridgedChain {
const ID: ChainId = *b"tbch";
type BlockNumber = TestBridgedBlockNumber;
type Hash = H256;
type Hasher = BlakeTwo256;
type Header = sp_runtime::testing::Header;
type AccountId = TestAccountId;
type Balance = u64;
type Nonce = u64;
type Signature = Signature;
fn max_extrinsic_size() -> u32 {
unreachable!()
}
fn max_extrinsic_weight() -> Weight {
unreachable!()
}
}
impl ChainWithBeefy for TestBridgedChain {
type CommitmentHasher = Keccak256;
type MmrHashing = Keccak256;
type MmrHash = <Keccak256 as Hash>::Output;
type BeefyMmrLeafExtra = ();
type AuthorityId = BeefyId;
type AuthorityIdToMerkleLeaf = pallet_beefy_mmr::BeefyEcdsaToEthereum;
}
/// Run test within test runtime.
pub fn run_test<T>(test: impl FnOnce() -> T) -> T {
sp_io::TestExternalities::new(Default::default()).execute_with(test)
}
/// Initialize pallet and run test.
pub fn run_test_with_initialize<T>(initial_validators_count: u32, test: impl FnOnce() -> T) -> T {
run_test(|| {
let validators = validator_ids(0, initial_validators_count);
let authority_set = authority_set_info(0, &validators);
crate::Pallet::<TestRuntime>::initialize(
RuntimeOrigin::root(),
bp_beefy::InitializationData {
operating_mode: BasicOperatingMode::Normal,
best_block_number: 0,
authority_set,
},
)
.expect("initialization data is correct");
test()
})
}
/// Import given commitment.
pub fn import_commitment(
header: crate::mock_chain::HeaderAndCommitment,
) -> sp_runtime::DispatchResult {
crate::Pallet::<TestRuntime>::submit_commitment(
RuntimeOrigin::signed(1),
header
.commitment
.expect("thou shall not call import_commitment on header without commitment"),
header.validator_set,
Box::new(header.leaf),
header.leaf_proof,
)
}
pub fn validator_pairs(index: u32, count: u32) -> Vec<BeefyPair> {
(index..index + count)
.map(|index| {
let mut seed = [1u8; 32];
seed[0..8].copy_from_slice(&(index as u64).encode());
BeefyPair::from_seed(&seed)
})
.collect()
}
/// Return identifiers of validators, starting at given index.
pub fn validator_ids(index: u32, count: u32) -> Vec<BeefyId> {
validator_pairs(index, count).into_iter().map(|pair| pair.public()).collect()
}
pub fn authority_set_info(id: u64, validators: &[BeefyId]) -> TestBridgedAuthoritySetInfo {
let merkle_root = get_authorities_mmr_root::<TestRuntime, (), _>(validators.iter());
TestBridgedAuthoritySetInfo { id, len: validators.len() as u32, keyset_commitment: merkle_root }
}
/// Sign BEEFY commitment.
pub fn sign_commitment(
commitment: Commitment<TestBridgedBlockNumber>,
validator_pairs: &[BeefyPair],
signature_count: usize,
) -> TestBridgedCommitment {
let total_validators = validator_pairs.len();
let random_validators =
rand::seq::index::sample(&mut rand::thread_rng(), total_validators, signature_count);
let commitment_hash = TestBridgedCommitmentHasher::hash(&commitment.encode());
let mut signatures = vec![None; total_validators];
for validator_idx in random_validators.iter() {
let validator = &validator_pairs[validator_idx];
signatures[validator_idx] =
Some(validator.as_inner_ref().sign_prehashed(commitment_hash.as_fixed_bytes()).into());
}
TestBridgedCommitment { commitment, signatures }
}
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Utilities to build bridged chain and BEEFY+MMR structures.
use crate::{
mock::{
sign_commitment, validator_pairs, BeefyPair, TestBridgedBlockNumber, TestBridgedCommitment,
TestBridgedHeader, TestBridgedMmrHash, TestBridgedMmrHashing, TestBridgedMmrNode,
TestBridgedMmrProof, TestBridgedRawMmrLeaf, TestBridgedValidatorSet,
TestBridgedValidatorSignature, TestRuntime,
},
utils::get_authorities_mmr_root,
};
use bp_beefy::{BeefyPayload, Commitment, ValidatorSetId, MMR_ROOT_PAYLOAD_ID};
use codec::Encode;
use pallet_mmr::NodeIndex;
use rand::Rng;
use sp_consensus_beefy::mmr::{BeefyNextAuthoritySet, MmrLeafVersion};
use sp_core::Pair;
use sp_runtime::traits::{Hash, Header as HeaderT};
use std::collections::HashMap;
#[derive(Debug, Clone)]
pub struct HeaderAndCommitment {
pub header: TestBridgedHeader,
pub commitment: Option<TestBridgedCommitment>,
pub validator_set: TestBridgedValidatorSet,
pub leaf: TestBridgedRawMmrLeaf,
pub leaf_proof: TestBridgedMmrProof,
pub mmr_root: TestBridgedMmrHash,
}
impl HeaderAndCommitment {
pub fn customize_signatures(
&mut self,
f: impl FnOnce(&mut Vec<Option<TestBridgedValidatorSignature>>),
) {
if let Some(commitment) = &mut self.commitment {
f(&mut commitment.signatures);
}
}
pub fn customize_commitment(
&mut self,
f: impl FnOnce(&mut Commitment<TestBridgedBlockNumber>),
validator_pairs: &[BeefyPair],
signature_count: usize,
) {
if let Some(mut commitment) = self.commitment.take() {
f(&mut commitment.commitment);
self.commitment =
Some(sign_commitment(commitment.commitment, validator_pairs, signature_count));
}
}
}
pub struct ChainBuilder {
headers: Vec<HeaderAndCommitment>,
validator_set_id: ValidatorSetId,
validator_keys: Vec<BeefyPair>,
mmr: mmr_lib::MMR<TestBridgedMmrNode, BridgedMmrHashMerge, BridgedMmrStorage>,
}
struct BridgedMmrStorage {
nodes: HashMap<NodeIndex, TestBridgedMmrNode>,
}
impl mmr_lib::MMRStore<TestBridgedMmrNode> for BridgedMmrStorage {
fn get_elem(&self, pos: NodeIndex) -> mmr_lib::Result<Option<TestBridgedMmrNode>> {
Ok(self.nodes.get(&pos).cloned())
}
fn append(&mut self, pos: NodeIndex, elems: Vec<TestBridgedMmrNode>) -> mmr_lib::Result<()> {
for (i, elem) in elems.into_iter().enumerate() {
self.nodes.insert(pos + i as NodeIndex, elem);
}
Ok(())
}
}
impl ChainBuilder {
/// Creates new chain builder with given validator set size.
pub fn new(initial_validators_count: u32) -> Self {
ChainBuilder {
headers: Vec::new(),
validator_set_id: 0,
validator_keys: validator_pairs(0, initial_validators_count),
mmr: mmr_lib::MMR::new(0, BridgedMmrStorage { nodes: HashMap::new() }),
}
}
/// Get header with given number.
pub fn header(&self, number: TestBridgedBlockNumber) -> HeaderAndCommitment {
self.headers[number as usize - 1].clone()
}
/// Returns single built header.
pub fn to_header(&self) -> HeaderAndCommitment {
assert_eq!(self.headers.len(), 1);
self.headers[0].clone()
}
/// Returns built chain.
pub fn to_chain(&self) -> Vec<HeaderAndCommitment> {
self.headers.clone()
}
/// Appends header, that has been finalized by BEEFY (so it has a linked signed commitment).
pub fn append_finalized_header(self) -> Self {
let next_validator_set_id = self.validator_set_id;
let next_validator_keys = self.validator_keys.clone();
HeaderBuilder::with_chain(self, next_validator_set_id, next_validator_keys).finalize()
}
/// Append multiple finalized headers at once.
pub fn append_finalized_headers(mut self, count: usize) -> Self {
for _ in 0..count {
self = self.append_finalized_header();
}
self
}
/// Appends header, that enacts new validator set.
///
/// Such headers are explicitly finalized by BEEFY.
pub fn append_handoff_header(self, next_validators_len: u32) -> Self {
let new_validator_set_id = self.validator_set_id + 1;
let new_validator_pairs =
validator_pairs(rand::thread_rng().gen::<u32>() % (u32::MAX / 2), next_validators_len);
HeaderBuilder::with_chain(self, new_validator_set_id, new_validator_pairs).finalize()
}
/// Append several default header without commitment.
pub fn append_default_headers(mut self, count: usize) -> Self {
for _ in 0..count {
let next_validator_set_id = self.validator_set_id;
let next_validator_keys = self.validator_keys.clone();
self =
HeaderBuilder::with_chain(self, next_validator_set_id, next_validator_keys).build()
}
self
}
}
/// Custom header builder.
pub struct HeaderBuilder {
chain: ChainBuilder,
header: TestBridgedHeader,
leaf: TestBridgedRawMmrLeaf,
leaf_proof: Option<TestBridgedMmrProof>,
next_validator_set_id: ValidatorSetId,
next_validator_keys: Vec<BeefyPair>,
}
impl HeaderBuilder {
fn with_chain(
chain: ChainBuilder,
next_validator_set_id: ValidatorSetId,
next_validator_keys: Vec<BeefyPair>,
) -> Self {
// we're starting with header#1, since header#0 is always finalized
let header_number = chain.headers.len() as TestBridgedBlockNumber + 1;
let header = TestBridgedHeader::new(
header_number,
Default::default(),
Default::default(),
chain.headers.last().map(|h| h.header.hash()).unwrap_or_default(),
Default::default(),
);
let next_validators =
next_validator_keys.iter().map(|pair| pair.public()).collect::<Vec<_>>();
let next_validators_mmr_root =
get_authorities_mmr_root::<TestRuntime, (), _>(next_validators.iter());
let leaf = sp_consensus_beefy::mmr::MmrLeaf {
version: MmrLeafVersion::new(1, 0),
parent_number_and_hash: (header.number().saturating_sub(1), *header.parent_hash()),
beefy_next_authority_set: BeefyNextAuthoritySet {
id: next_validator_set_id,
len: next_validators.len() as u32,
keyset_commitment: next_validators_mmr_root,
},
leaf_extra: (),
};
HeaderBuilder {
chain,
header,
leaf,
leaf_proof: None,
next_validator_keys,
next_validator_set_id,
}
}
/// Customize generated proof of header MMR leaf.
///
/// Can only be called once.
pub fn customize_proof(
mut self,
f: impl FnOnce(TestBridgedMmrProof) -> TestBridgedMmrProof,
) -> Self {
assert!(self.leaf_proof.is_none());
let leaf_hash = TestBridgedMmrHashing::hash(&self.leaf.encode());
let node = TestBridgedMmrNode::Hash(leaf_hash);
let leaf_position = self.chain.mmr.push(node).unwrap();
let proof = self.chain.mmr.gen_proof(vec![leaf_position]).unwrap();
// genesis has no leaf => leaf index is header number minus 1
let leaf_index = *self.header.number() - 1;
let leaf_count = *self.header.number();
self.leaf_proof = Some(f(TestBridgedMmrProof {
leaf_indices: vec![leaf_index],
leaf_count,
items: proof.proof_items().iter().map(|i| i.hash()).collect(),
}));
self
}
/// Build header without commitment.
pub fn build(mut self) -> ChainBuilder {
if self.leaf_proof.is_none() {
self = self.customize_proof(|proof| proof);
}
let validators =
self.chain.validator_keys.iter().map(|pair| pair.public()).collect::<Vec<_>>();
self.chain.headers.push(HeaderAndCommitment {
header: self.header,
commitment: None,
validator_set: TestBridgedValidatorSet::new(validators, self.chain.validator_set_id)
.unwrap(),
leaf: self.leaf,
leaf_proof: self.leaf_proof.expect("guaranteed by the customize_proof call above; qed"),
mmr_root: self.chain.mmr.get_root().unwrap().hash(),
});
self.chain.validator_set_id = self.next_validator_set_id;
self.chain.validator_keys = self.next_validator_keys;
self.chain
}
/// Build header with commitment.
pub fn finalize(self) -> ChainBuilder {
let validator_count = self.chain.validator_keys.len();
let current_validator_set_id = self.chain.validator_set_id;
let current_validator_set_keys = self.chain.validator_keys.clone();
let mut chain = self.build();
let last_header = chain.headers.last_mut().expect("added by append_header; qed");
last_header.commitment = Some(sign_commitment(
Commitment {
payload: BeefyPayload::from_single_entry(
MMR_ROOT_PAYLOAD_ID,
chain.mmr.get_root().unwrap().hash().encode(),
),
block_number: *last_header.header.number(),
validator_set_id: current_validator_set_id,
},
&current_validator_set_keys,
validator_count * 2 / 3 + 1,
));
chain
}
}
/// Default Merging & Hashing behavior for MMR.
pub struct BridgedMmrHashMerge;
impl mmr_lib::Merge for BridgedMmrHashMerge {
type Item = TestBridgedMmrNode;
fn merge(left: &Self::Item, right: &Self::Item) -> Self::Item {
let mut concat = left.hash().as_ref().to_vec();
concat.extend_from_slice(right.hash().as_ref());
TestBridgedMmrNode::Hash(TestBridgedMmrHashing::hash(&concat))
}
}
use crate::{
BridgedBeefyAuthorityId, BridgedBeefyAuthoritySet, BridgedBeefyAuthoritySetInfo,
BridgedBeefyMmrLeaf, BridgedBeefySignedCommitment, BridgedChain, BridgedMmrHash,
BridgedMmrHashing, BridgedMmrProof, Config, Error, LOG_TARGET,
};
use bp_beefy::{merkle_root, verify_mmr_leaves_proof, BeefyAuthorityId, MmrDataOrHash};
use codec::Encode;
use frame_support::ensure;
use sp_runtime::traits::{Convert, Hash};
use sp_std::{vec, vec::Vec};
type BridgedMmrDataOrHash<T, I> = MmrDataOrHash<BridgedMmrHashing<T, I>, BridgedBeefyMmrLeaf<T, I>>;
/// A way to encode validator id to the BEEFY merkle tree leaf.
type BridgedBeefyAuthorityIdToMerkleLeaf<T, I> =
bp_beefy::BeefyAuthorityIdToMerkleLeafOf<BridgedChain<T, I>>;
/// Get the MMR root for a collection of validators.
pub(crate) fn get_authorities_mmr_root<
'a,
T: Config<I>,
I: 'static,
V: Iterator<Item = &'a BridgedBeefyAuthorityId<T, I>>,
>(
authorities: V,
) -> BridgedMmrHash<T, I> {
let merkle_leafs = authorities
.cloned()
.map(BridgedBeefyAuthorityIdToMerkleLeaf::<T, I>::convert)
.collect::<Vec<_>>();
merkle_root::<BridgedMmrHashing<T, I>, _>(merkle_leafs)
}
fn verify_authority_set<T: Config<I>, I: 'static>(
authority_set_info: &BridgedBeefyAuthoritySetInfo<T, I>,
authority_set: &BridgedBeefyAuthoritySet<T, I>,
) -> Result<(), Error<T, I>> {
ensure!(authority_set.id() == authority_set_info.id, Error::<T, I>::InvalidValidatorSetId);
ensure!(
authority_set.len() == authority_set_info.len as usize,
Error::<T, I>::InvalidValidatorSetLen
);
// Ensure that the authority set that signed the commitment is the expected one.
let root = get_authorities_mmr_root::<T, I, _>(authority_set.validators().iter());
ensure!(root == authority_set_info.keyset_commitment, Error::<T, I>::InvalidValidatorSetRoot);
Ok(())
}
/// Number of correct signatures, required from given validators set to accept signed
/// commitment.
///
/// We're using 'conservative' approach here, where signatures of `2/3+1` validators are
/// required..
pub(crate) fn signatures_required(validators_len: usize) -> usize {
validators_len - validators_len.saturating_sub(1) / 3
}
fn verify_signatures<T: Config<I>, I: 'static>(
commitment: &BridgedBeefySignedCommitment<T, I>,
authority_set: &BridgedBeefyAuthoritySet<T, I>,
) -> Result<(), Error<T, I>> {
ensure!(
commitment.signatures.len() == authority_set.len(),
Error::<T, I>::InvalidCommitmentSignaturesLen
);
// Ensure that the commitment was signed by enough authorities.
let msg = commitment.commitment.encode();
let mut missing_signatures = signatures_required(authority_set.len());
for (idx, (authority, maybe_sig)) in
authority_set.validators().iter().zip(commitment.signatures.iter()).enumerate()
{
if let Some(sig) = maybe_sig {
if authority.verify(sig, &msg) {
missing_signatures = missing_signatures.saturating_sub(1);
if missing_signatures == 0 {
break
}
} else {
log::debug!(
target: LOG_TARGET,
"Signed commitment contains incorrect signature of validator {} ({:?}): {:?}",
idx,
authority,
sig,
);
}
}
}
ensure!(missing_signatures == 0, Error::<T, I>::NotEnoughCorrectSignatures);
Ok(())
}
/// Extract MMR root from commitment payload.
fn extract_mmr_root<T: Config<I>, I: 'static>(
commitment: &BridgedBeefySignedCommitment<T, I>,
) -> Result<BridgedMmrHash<T, I>, Error<T, I>> {
commitment
.commitment
.payload
.get_decoded(&bp_beefy::MMR_ROOT_PAYLOAD_ID)
.ok_or(Error::MmrRootMissingFromCommitment)
}
pub(crate) fn verify_commitment<T: Config<I>, I: 'static>(
commitment: &BridgedBeefySignedCommitment<T, I>,
authority_set_info: &BridgedBeefyAuthoritySetInfo<T, I>,
authority_set: &BridgedBeefyAuthoritySet<T, I>,
) -> Result<BridgedMmrHash<T, I>, Error<T, I>> {
// Ensure that the commitment is signed by the best known BEEFY validator set.
ensure!(
commitment.commitment.validator_set_id == authority_set_info.id,
Error::<T, I>::InvalidCommitmentValidatorSetId
);
ensure!(
commitment.signatures.len() == authority_set_info.len as usize,
Error::<T, I>::InvalidCommitmentSignaturesLen
);
verify_authority_set(authority_set_info, authority_set)?;
verify_signatures(commitment, authority_set)?;
extract_mmr_root(commitment)
}
/// Verify MMR proof of given leaf.
pub(crate) fn verify_beefy_mmr_leaf<T: Config<I>, I: 'static>(
mmr_leaf: &BridgedBeefyMmrLeaf<T, I>,
mmr_proof: BridgedMmrProof<T, I>,
mmr_root: BridgedMmrHash<T, I>,
) -> Result<(), Error<T, I>> {
let mmr_proof_leaf_count = mmr_proof.leaf_count;
let mmr_proof_length = mmr_proof.items.len();
// Verify the mmr proof for the provided leaf.
let mmr_leaf_hash = BridgedMmrHashing::<T, I>::hash(&mmr_leaf.encode());
verify_mmr_leaves_proof(
mmr_root,
vec![BridgedMmrDataOrHash::<T, I>::Hash(mmr_leaf_hash)],
mmr_proof,
)
.map_err(|e| {
log::error!(
target: LOG_TARGET,
"MMR proof of leaf {:?} (root: {:?}, leaf count: {}, len: {}) \
verification has failed with error: {:?}",
mmr_leaf_hash,
mmr_root,
mmr_proof_leaf_count,
mmr_proof_length,
e,
);
Error::<T, I>::MmrProofVerificationFailed
})
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{mock::*, mock_chain::*, *};
use bp_beefy::{BeefyPayload, MMR_ROOT_PAYLOAD_ID};
use frame_support::{assert_noop, assert_ok};
use sp_consensus_beefy::ValidatorSet;
#[test]
fn submit_commitment_checks_metadata() {
run_test_with_initialize(8, || {
// Fails if `commitment.commitment.validator_set_id` differs.
let mut header = ChainBuilder::new(8).append_finalized_header().to_header();
header.customize_commitment(
|commitment| {
commitment.validator_set_id += 1;
},
&validator_pairs(0, 8),
6,
);
assert_noop!(
import_commitment(header),
Error::<TestRuntime, ()>::InvalidCommitmentValidatorSetId,
);
// Fails if `commitment.signatures.len()` differs.
let mut header = ChainBuilder::new(8).append_finalized_header().to_header();
header.customize_signatures(|signatures| {
signatures.pop();
});
assert_noop!(
import_commitment(header),
Error::<TestRuntime, ()>::InvalidCommitmentSignaturesLen,
);
});
}
#[test]
fn submit_commitment_checks_validator_set() {
run_test_with_initialize(8, || {
// Fails if `ValidatorSet::id` differs.
let mut header = ChainBuilder::new(8).append_finalized_header().to_header();
header.validator_set = ValidatorSet::new(validator_ids(0, 8), 1).unwrap();
assert_noop!(
import_commitment(header),
Error::<TestRuntime, ()>::InvalidValidatorSetId,
);
// Fails if `ValidatorSet::len()` differs.
let mut header = ChainBuilder::new(8).append_finalized_header().to_header();
header.validator_set = ValidatorSet::new(validator_ids(0, 5), 0).unwrap();
assert_noop!(
import_commitment(header),
Error::<TestRuntime, ()>::InvalidValidatorSetLen,
);
// Fails if the validators differ.
let mut header = ChainBuilder::new(8).append_finalized_header().to_header();
header.validator_set = ValidatorSet::new(validator_ids(3, 8), 0).unwrap();
assert_noop!(
import_commitment(header),
Error::<TestRuntime, ()>::InvalidValidatorSetRoot,
);
});
}
#[test]
fn submit_commitment_checks_signatures() {
run_test_with_initialize(20, || {
// Fails when there aren't enough signatures.
let mut header = ChainBuilder::new(20).append_finalized_header().to_header();
header.customize_signatures(|signatures| {
let first_signature_idx = signatures.iter().position(Option::is_some).unwrap();
signatures[first_signature_idx] = None;
});
assert_noop!(
import_commitment(header),
Error::<TestRuntime, ()>::NotEnoughCorrectSignatures,
);
// Fails when there aren't enough correct signatures.
let mut header = ChainBuilder::new(20).append_finalized_header().to_header();
header.customize_signatures(|signatures| {
let first_signature_idx = signatures.iter().position(Option::is_some).unwrap();
let last_signature_idx = signatures.len() -
signatures.iter().rev().position(Option::is_some).unwrap() -
1;
signatures[first_signature_idx] = signatures[last_signature_idx].clone();
});
assert_noop!(
import_commitment(header),
Error::<TestRuntime, ()>::NotEnoughCorrectSignatures,
);
// Returns Ok(()) when there are enough signatures, even if some are incorrect.
let mut header = ChainBuilder::new(20).append_finalized_header().to_header();
header.customize_signatures(|signatures| {
let first_signature_idx = signatures.iter().position(Option::is_some).unwrap();
let first_missing_signature_idx =
signatures.iter().position(Option::is_none).unwrap();
signatures[first_missing_signature_idx] = signatures[first_signature_idx].clone();
});
assert_ok!(import_commitment(header));
});
}
#[test]
fn submit_commitment_checks_mmr_proof() {
run_test_with_initialize(1, || {
let validators = validator_pairs(0, 1);
// Fails if leaf is not for parent.
let mut header = ChainBuilder::new(1).append_finalized_header().to_header();
header.leaf.parent_number_and_hash.0 += 1;
assert_noop!(
import_commitment(header),
Error::<TestRuntime, ()>::MmrProofVerificationFailed,
);
// Fails if mmr proof is incorrect.
let mut header = ChainBuilder::new(1).append_finalized_header().to_header();
header.leaf_proof.leaf_indices[0] += 1;
assert_noop!(
import_commitment(header),
Error::<TestRuntime, ()>::MmrProofVerificationFailed,
);
// Fails if mmr root is incorrect.
let mut header = ChainBuilder::new(1).append_finalized_header().to_header();
// Replace MMR root with zeroes.
header.customize_commitment(
|commitment| {
commitment.payload =
BeefyPayload::from_single_entry(MMR_ROOT_PAYLOAD_ID, [0u8; 32].encode());
},
&validators,
1,
);
assert_noop!(
import_commitment(header),
Error::<TestRuntime, ()>::MmrProofVerificationFailed,
);
});
}
#[test]
fn submit_commitment_extracts_mmr_root() {
run_test_with_initialize(1, || {
let validators = validator_pairs(0, 1);
// Fails if there is no mmr root in the payload.
let mut header = ChainBuilder::new(1).append_finalized_header().to_header();
// Remove MMR root from the payload.
header.customize_commitment(
|commitment| {
commitment.payload = BeefyPayload::from_single_entry(*b"xy", vec![]);
},
&validators,
1,
);
assert_noop!(
import_commitment(header),
Error::<TestRuntime, ()>::MmrRootMissingFromCommitment,
);
// Fails if mmr root can't be decoded.
let mut header = ChainBuilder::new(1).append_finalized_header().to_header();
// MMR root is a 32-byte array and we have replaced it with single byte
header.customize_commitment(
|commitment| {
commitment.payload =
BeefyPayload::from_single_entry(MMR_ROOT_PAYLOAD_ID, vec![42]);
},
&validators,
1,
);
assert_noop!(
import_commitment(header),
Error::<TestRuntime, ()>::MmrRootMissingFromCommitment,
);
});
}
#[test]
fn submit_commitment_stores_valid_data() {
run_test_with_initialize(20, || {
let header = ChainBuilder::new(20).append_handoff_header(30).to_header();
assert_ok!(import_commitment(header.clone()));
assert_eq!(ImportedCommitmentsInfo::<TestRuntime>::get().unwrap().best_block_number, 1);
assert_eq!(CurrentAuthoritySetInfo::<TestRuntime>::get().id, 1);
assert_eq!(CurrentAuthoritySetInfo::<TestRuntime>::get().len, 30);
assert_eq!(
ImportedCommitments::<TestRuntime>::get(1).unwrap(),
bp_beefy::ImportedCommitment {
parent_number_and_hash: (0, [0; 32].into()),
mmr_root: header.mmr_root,
},
);
});
}
}
[package]
name = "pallet-bridge-grandpa"
version = "0.7.0"
description = "Module implementing GRANDPA on-chain light client used for bridging consensus of substrate-based chains."
authors.workspace = true
edition.workspace = true
license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
[lints]
workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false }
finality-grandpa = { version = "0.16.2", default-features = false }
log = { workspace = true }
scale-info = { version = "2.10.0", default-features = false, features = ["derive"] }
# Bridge Dependencies
bp-runtime = { path = "../../primitives/runtime", default-features = false }
bp-header-chain = { path = "../../primitives/header-chain", default-features = false }
# Substrate Dependencies
frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false, features = ["serde"] }
sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false, features = ["serde"] }
sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-trie = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
# Optional Benchmarking Dependencies
bp-test-utils = { path = "../../primitives/test-utils", default-features = false, optional = true }
frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false, optional = true }
[dev-dependencies]
sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" }
sp-io = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" }
[features]
default = ["std"]
std = [
"bp-header-chain/std",
"bp-runtime/std",
"bp-test-utils/std",
"codec/std",
"finality-grandpa/std",
"frame-benchmarking/std",
"frame-support/std",
"frame-system/std",
"log/std",
"scale-info/std",
"sp-consensus-grandpa/std",
"sp-runtime/std",
"sp-std/std",
"sp-trie/std",
]
runtime-benchmarks = [
"bp-test-utils",
"frame-benchmarking/runtime-benchmarks",
"frame-support/runtime-benchmarks",
"frame-system/runtime-benchmarks",
"sp-runtime/runtime-benchmarks",
]
try-runtime = [
"frame-support/try-runtime",
"frame-system/try-runtime",
"sp-runtime/try-runtime",
]
# Bridge GRANDPA Pallet
The bridge GRANDPA pallet is a light client for the GRANDPA finality gadget, running at the bridged chain.
It may import headers and their GRANDPA finality proofs (justifications) of the bridged chain. Imported
headers then may be used to verify storage proofs by other pallets. This makes the bridge GRANDPA pallet
a basic pallet of all bridges with Substrate-based chains. It is used by all bridge types (bridge between
standalone chains, between parachains and any combination of those) and is used by other bridge pallets.
It is used by the parachains light client (bridge parachains pallet) and by messages pallet.
## A Brief Introduction into GRANDPA Finality
You can find detailed information on GRANDPA, by exploring its [repository](https://github.com/paritytech/finality-grandpa).
Here is the minimal reqiuired GRANDPA information to understand how pallet works.
Any Substrate chain may use different block authorship algorithms (like BABE or Aura) to determine block producers and
generate blocks. This has nothing common with finality, though - the task of block authorship is to coordinate
blocks generation. Any block may be reverted (if there's a fork) if it is not finalized. The finality solution
for (standalone) Substrate-based chains is the GRANDPA finality gadget. If some block is finalized by the gadget, it
can't be reverted.
In GRANDPA, there are validators, identified by their public keys. They select some generated block and produce
signatures on this block hash. If there are enough (more than `2 / 3 * N`, where `N` is number of validators)
signatures, then the block is considered finalized. The set of signatures for the block is called justification.
Anyone who knows the public keys of validators is able to verify GRANDPA justification and that it is generated
for provided header.
There are two main things in GRANDPA that help building light clients:
- there's no need to import all headers of the bridged chain. Light client may import finalized headers or just
some of finalized headders that it consider useful. While the validators set stays the same, the client may
import any header that is finalized by this set;
- when validators set changes, the GRANDPA gadget adds next set to the header. So light client doesn't need to
verify storage proofs when this happens - it only needs to look at the header and see if it changes the set.
Once set is changed, all following justifications are generated by the new set. Header that is changing the
set is called "mandatory" in the pallet. As the name says, the light client need to import all such headers
to be able to operate properly.
## Pallet Operations
The main entrypoint of the pallet is the `submit_finality_proof_ex` call. It has three arguments - the finalized
headers, associated GRANDPA justification and ID of the authority set that has generated this justification. The
call simply verifies the justification using current validators set and checks if header is better than the
previous best header. If both checks are passed, the header (only its useful fields) is inserted into the runtime
storage and may be used by other pallets to verify storage proofs.
The submitter pays regular fee for submitting all headers, except for the mandatory header. Since it is
required for the pallet operations, submitting such header is free. So if you're ok with session-length
lags (meaning that there's exactly 1 mandatory header per session), the cost of pallet calls is zero.
When the pallet sees mandatory header, it updates the validators set with the set from the header. All
following justifications (until next mandatory header) must be generated by this new set.
## Pallet Initialization
As the previous section states, there are two things that are mandatory for pallet operations: best finalized
header and the current validators set. Without it the pallet can't import any headers. But how to provide
initial values for these fields? There are two options.
First option, while it is easier, doesn't work in all cases. It is to start chain with initial header and
validators set specified in the chain specification. This won't work, however, if we want to add bridge
to already started chain.
For the latter case we have the `initialize` call. It accepts the initial header and initial validators set.
The call may be called by the governance, root or by the pallet owner (if it is set).
## Non-Essential Functionality
There may be a special account in every runtime where the bridge GRANDPA module is deployed. This
account, named 'module owner', is like a module-level sudo account - he's able to halt and
resume all module operations without requiring runtime upgrade. Calls that are related to this
account are:
- `fn set_owner()`: current module owner may call it to transfer "ownership" to another account;
- `fn set_operating_mode()`: the module owner (or sudo account) may call this function to stop all
module operations. After this call, all finality proofs will be rejected until further `set_operating_mode` call'.
This call may be used when something extraordinary happens with the bridge;
- `fn initialize()`: module owner may call this function to initialize the bridge.
If pallet owner is not defined, the governance may be used to make those calls.
## Signed Extension to Reject Obsolete Headers
It'd be better for anyone (for chain and for submitters) to reject all transactions that are submitting
already known headers to the pallet. This way, we leave block space to other useful transactions and
we don't charge concurrent submitters for their honest actions.
To deal with that, we have a [signed extension](./src/call_ext) that may be added to the runtime.
It does exactly what is required - rejects all transactions with already known headers. The submitter
pays nothing for such transactions - they're simply removed from the transaction pool, when the block
is built.
You may also take a look at the [`generate_bridge_reject_obsolete_headers_and_messages`](../../bin/runtime-common/src/lib.rs)
macro that bundles several similar signed extensions in a single one.
## GRANDPA Finality Relay
We have an offchain actor, who is watching for GRANDPA justifications and submits them to the bridged chain.
It is the finality relay - you may look at the [crate level documentation and the code](../../relays/finality/).
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Benchmarks for the GRANDPA Pallet.
//!
//! The main dispatchable for the GRANDPA pallet is `submit_finality_proof_ex`. Our benchmarks
//! are based around `submit_finality_proof`, though - from weight PoV they are the same calls.
//! There are to main factors which affect finality proof verification:
//!
//! 1. The number of `votes-ancestries` in the justification
//! 2. The number of `pre-commits` in the justification
//!
//! Vote ancestries are the headers between (`finality_target`, `head_of_chain`], where
//! `header_of_chain` is a descendant of `finality_target`.
//!
//! Pre-commits are messages which are signed by validators at the head of the chain they think is
//! the best.
//!
//! Consider the following:
//!
//! / B <- C'
//! A <- B <- C
//!
//! The common ancestor of both forks is block A, so this is what GRANDPA will finalize. In order to
//! verify this we will have vote ancestries of `[B, C, B', C']` and pre-commits `[C, C']`.
//!
//! Note that the worst case scenario here would be a justification where each validator has it's
//! own fork which is `SESSION_LENGTH` blocks long.
use crate::*;
use bp_header_chain::justification::required_justification_precommits;
use bp_runtime::BasicOperatingMode;
use bp_test_utils::{
accounts, make_justification_for_header, JustificationGeneratorParams, TEST_GRANDPA_ROUND,
TEST_GRANDPA_SET_ID,
};
use frame_benchmarking::{benchmarks_instance_pallet, whitelisted_caller};
use frame_system::RawOrigin;
use sp_consensus_grandpa::AuthorityId;
use sp_runtime::traits::{One, Zero};
use sp_std::vec::Vec;
/// The maximum number of vote ancestries to include in a justification.
///
/// In practice this would be limited by the session length (number of blocks a single authority set
/// can produce) of a given chain.
const MAX_VOTE_ANCESTRIES: u32 = 1000;
// `1..MAX_VOTE_ANCESTRIES` is too large && benchmarks are running for almost 40m (steps=50,
// repeat=20) on a decent laptop, which is too much. Since we're building linear function here,
// let's just select some limited subrange for benchmarking.
const MAX_VOTE_ANCESTRIES_RANGE_BEGIN: u32 = MAX_VOTE_ANCESTRIES / 20;
const MAX_VOTE_ANCESTRIES_RANGE_END: u32 =
MAX_VOTE_ANCESTRIES_RANGE_BEGIN + MAX_VOTE_ANCESTRIES_RANGE_BEGIN;
// the same with validators - if there are too much validators, let's run benchmarks on subrange
fn precommits_range_end<T: Config<I>, I: 'static>() -> u32 {
let max_bridged_authorities = T::BridgedChain::MAX_AUTHORITIES_COUNT;
if max_bridged_authorities > 128 {
sp_std::cmp::max(128, max_bridged_authorities / 5)
} else {
max_bridged_authorities
};
required_justification_precommits(max_bridged_authorities)
}
/// Prepare header and its justification to submit using `submit_finality_proof`.
fn prepare_benchmark_data<T: Config<I>, I: 'static>(
precommits: u32,
ancestors: u32,
) -> (BridgedHeader<T, I>, GrandpaJustification<BridgedHeader<T, I>>) {
// going from precommits to total authorities count
let total_authorities_count = (3 * precommits - 1) / 2;
let authority_list = accounts(total_authorities_count as u16)
.iter()
.map(|id| (AuthorityId::from(*id), 1))
.collect::<Vec<_>>();
let genesis_header: BridgedHeader<T, I> = bp_test_utils::test_header(Zero::zero());
let genesis_hash = genesis_header.hash();
let init_data = InitializationData {
header: Box::new(genesis_header),
authority_list,
set_id: TEST_GRANDPA_SET_ID,
operating_mode: BasicOperatingMode::Normal,
};
bootstrap_bridge::<T, I>(init_data);
assert!(<ImportedHeaders<T, I>>::contains_key(genesis_hash));
let header: BridgedHeader<T, I> = bp_test_utils::test_header(One::one());
let params = JustificationGeneratorParams {
header: header.clone(),
round: TEST_GRANDPA_ROUND,
set_id: TEST_GRANDPA_SET_ID,
authorities: accounts(precommits as u16).iter().map(|k| (*k, 1)).collect::<Vec<_>>(),
ancestors,
forks: 1,
};
let justification = make_justification_for_header(params);
(header, justification)
}
benchmarks_instance_pallet! {
// This is the "gold standard" benchmark for this extrinsic, and it's what should be used to
// annotate the weight in the pallet.
submit_finality_proof {
let p in 1 .. precommits_range_end::<T, I>();
let v in MAX_VOTE_ANCESTRIES_RANGE_BEGIN..MAX_VOTE_ANCESTRIES_RANGE_END;
let caller: T::AccountId = whitelisted_caller();
let (header, justification) = prepare_benchmark_data::<T, I>(p, v);
}: submit_finality_proof(RawOrigin::Signed(caller), Box::new(header), justification)
verify {
let genesis_header: BridgedHeader<T, I> = bp_test_utils::test_header(Zero::zero());
let header: BridgedHeader<T, I> = bp_test_utils::test_header(One::one());
let expected_hash = header.hash();
// check that the header#1 has been inserted
assert_eq!(<BestFinalized<T, I>>::get().unwrap().1, expected_hash);
assert!(<ImportedHeaders<T, I>>::contains_key(expected_hash));
// check that the header#0 has been pruned
assert!(!<ImportedHeaders<T, I>>::contains_key(genesis_header.hash()));
}
impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::TestRuntime)
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
use crate::{
weights::WeightInfo, BestFinalized, BridgedBlockNumber, BridgedHeader, Config,
CurrentAuthoritySet, Error, FreeHeadersRemaining, Pallet,
};
use bp_header_chain::{
justification::GrandpaJustification, max_expected_submit_finality_proof_arguments_size,
ChainWithGrandpa, GrandpaConsensusLogReader,
};
use bp_runtime::{BlockNumberOf, Chain, OwnedBridgeModule};
use codec::Encode;
use frame_support::{
dispatch::CallableCallFor,
traits::{Get, IsSubType},
weights::Weight,
};
use sp_consensus_grandpa::SetId;
use sp_runtime::{
traits::{CheckedSub, Header, Zero},
transaction_validity::{InvalidTransaction, TransactionValidityError},
RuntimeDebug, SaturatedConversion,
};
/// Info about a `SubmitParachainHeads` call which tries to update a single parachain.
#[derive(Copy, Clone, PartialEq, RuntimeDebug)]
pub struct SubmitFinalityProofInfo<N> {
/// Number of the finality target.
pub block_number: N,
/// An identifier of the validators set that has signed the submitted justification.
/// It might be `None` if deprecated version of the `submit_finality_proof` is used.
pub current_set_id: Option<SetId>,
/// If `true`, then the call proves new **mandatory** header.
pub is_mandatory: bool,
/// If `true`, then the call must be free (assuming that everything else is valid) to
/// be treated as valid.
pub is_free_execution_expected: bool,
/// Extra weight that we assume is included in the call.
///
/// We have some assumptions about headers and justifications of the bridged chain.
/// We know that if our assumptions are correct, then the call must not have the
/// weight above some limit. The fee paid for weight above that limit, is never refunded.
pub extra_weight: Weight,
/// Extra size (in bytes) that we assume are included in the call.
///
/// We have some assumptions about headers and justifications of the bridged chain.
/// We know that if our assumptions are correct, then the call must not have the
/// weight above some limit. The fee paid for bytes above that limit, is never refunded.
pub extra_size: u32,
}
/// Verified `SubmitFinalityProofInfo<N>`.
#[derive(Copy, Clone, PartialEq, RuntimeDebug)]
pub struct VerifiedSubmitFinalityProofInfo<N> {
/// Base call information.
pub base: SubmitFinalityProofInfo<N>,
/// A difference between bundled bridged header and best bridged header known to us
/// before the call.
pub improved_by: N,
}
impl<N> SubmitFinalityProofInfo<N> {
/// Returns `true` if call size/weight is below our estimations for regular calls.
pub fn fits_limits(&self) -> bool {
self.extra_weight.is_zero() && self.extra_size.is_zero()
}
}
/// Helper struct that provides methods for working with the `SubmitFinalityProof` call.
pub struct SubmitFinalityProofHelper<T: Config<I>, I: 'static> {
_phantom_data: sp_std::marker::PhantomData<(T, I)>,
}
impl<T: Config<I>, I: 'static> SubmitFinalityProofHelper<T, I> {
/// Returns `true` if we may fit more free headers into the current block. If `false` is
/// returned, the call will be paid even if `is_free_execution_expected` has been set
/// to `true`.
pub fn can_import_anything_for_free() -> bool {
// `unwrap_or(u32::MAX)` means that if `FreeHeadersRemaining` is `None`, we may accept
// this header for free. That is a small cheat - is is `None` if executed outside of
// transaction (e.g. during block initialization). Normal relayer would never submit
// such calls, but if he did, that is not our problem. During normal transactions,
// the `FreeHeadersRemaining` is always `Some(_)`.
let free_headers_remaining = FreeHeadersRemaining::<T, I>::get().unwrap_or(u32::MAX);
if free_headers_remaining == 0 {
return false
}
true
}
/// Check that the: (1) GRANDPA head provided by the `SubmitFinalityProof` is better than the
/// best one we know (2) if `current_set_id` matches the current authority set id, if specified
/// and (3) whether transaction MAY be free for the submitter if `is_free_execution_expected`
/// is `true`.
///
/// Returns number of headers between the current best finalized header, known to the pallet
/// and the bundled header.
pub fn check_obsolete_from_extension(
call_info: &SubmitFinalityProofInfo<BlockNumberOf<T::BridgedChain>>,
) -> Result<BlockNumberOf<T::BridgedChain>, Error<T, I>> {
// do basic checks first
let improved_by = Self::check_obsolete(call_info.block_number, call_info.current_set_id)?;
// if submitter has NOT specified that it wants free execution, then we are done
if !call_info.is_free_execution_expected {
return Ok(improved_by);
}
// else - if we can not accept more free headers, "reject" the transaction
if !Self::can_import_anything_for_free() {
log::trace!(
target: crate::LOG_TARGET,
"Cannot accept free {:?} header {:?}. No more free slots remaining",
T::BridgedChain::ID,
call_info.block_number,
);
return Err(Error::<T, I>::FreeHeadersLimitExceded);
}
// ensure that the `improved_by` is larger than the configured free interval
if !call_info.is_mandatory {
if let Some(free_headers_interval) = T::FreeHeadersInterval::get() {
if improved_by < free_headers_interval.into() {
log::trace!(
target: crate::LOG_TARGET,
"Cannot accept free {:?} header {:?}. Too small difference \
between submitted headers: {:?} vs {}",
T::BridgedChain::ID,
call_info.block_number,
improved_by,
free_headers_interval,
);
return Err(Error::<T, I>::BelowFreeHeaderInterval);
}
}
}
// we do not check whether the header matches free submission criteria here - it is the
// relayer responsibility to check that
Ok(improved_by)
}
/// Check that the GRANDPA head provided by the `SubmitFinalityProof` is better than the best
/// one we know. Additionally, checks if `current_set_id` matches the current authority set
/// id, if specified. This method is called by the call code and the transaction extension,
/// so it does not check the free execution.
///
/// Returns number of headers between the current best finalized header, known to the pallet
/// and the bundled header.
pub fn check_obsolete(
finality_target: BlockNumberOf<T::BridgedChain>,
current_set_id: Option<SetId>,
) -> Result<BlockNumberOf<T::BridgedChain>, Error<T, I>> {
let best_finalized = BestFinalized::<T, I>::get().ok_or_else(|| {
log::trace!(
target: crate::LOG_TARGET,
"Cannot finalize header {:?} because pallet is not yet initialized",
finality_target,
);
<Error<T, I>>::NotInitialized
})?;
let improved_by = match finality_target.checked_sub(&best_finalized.number()) {
Some(improved_by) if improved_by > Zero::zero() => improved_by,
_ => {
log::trace!(
target: crate::LOG_TARGET,
"Cannot finalize obsolete header: bundled {:?}, best {:?}",
finality_target,
best_finalized,
);
return Err(Error::<T, I>::OldHeader)
},
};
if let Some(current_set_id) = current_set_id {
let actual_set_id = <CurrentAuthoritySet<T, I>>::get().set_id;
if current_set_id != actual_set_id {
log::trace!(
target: crate::LOG_TARGET,
"Cannot finalize header signed by unknown authority set: bundled {:?}, best {:?}",
current_set_id,
actual_set_id,
);
return Err(Error::<T, I>::InvalidAuthoritySetId)
}
}
Ok(improved_by)
}
/// Check if the `SubmitFinalityProof` was successfully executed.
pub fn was_successful(finality_target: BlockNumberOf<T::BridgedChain>) -> bool {
match BestFinalized::<T, I>::get() {
Some(best_finalized) => best_finalized.number() == finality_target,
None => false,
}
}
}
/// Trait representing a call that is a sub type of this pallet's call.
pub trait CallSubType<T: Config<I, RuntimeCall = Self>, I: 'static>:
IsSubType<CallableCallFor<Pallet<T, I>, T>>
{
/// Extract finality proof info from a runtime call.
fn submit_finality_proof_info(
&self,
) -> Option<SubmitFinalityProofInfo<BridgedBlockNumber<T, I>>> {
if let Some(crate::Call::<T, I>::submit_finality_proof { finality_target, justification }) =
self.is_sub_type()
{
return Some(submit_finality_proof_info_from_args::<T, I>(
finality_target,
justification,
None,
false,
))
} else if let Some(crate::Call::<T, I>::submit_finality_proof_ex {
finality_target,
justification,
current_set_id,
is_free_execution_expected,
}) = self.is_sub_type()
{
return Some(submit_finality_proof_info_from_args::<T, I>(
finality_target,
justification,
Some(*current_set_id),
*is_free_execution_expected,
))
}
None
}
/// Validate Grandpa headers in order to avoid "mining" transactions that provide outdated
/// bridged chain headers. Without this validation, even honest relayers may lose their funds
/// if there are multiple relays running and submitting the same information.
///
/// Returns `Ok(None)` if the call is not the `submit_finality_proof` call of our pallet.
/// Returns `Ok(Some(_))` if the call is the `submit_finality_proof` call of our pallet and
/// we believe the call brings header that improves the pallet state.
/// Returns `Err(_)` if the call is the `submit_finality_proof` call of our pallet and we
/// believe that the call will fail.
fn check_obsolete_submit_finality_proof(
&self,
) -> Result<
Option<VerifiedSubmitFinalityProofInfo<BridgedBlockNumber<T, I>>>,
TransactionValidityError,
>
where
Self: Sized,
{
let call_info = match self.submit_finality_proof_info() {
Some(finality_proof) => finality_proof,
_ => return Ok(None),
};
if Pallet::<T, I>::ensure_not_halted().is_err() {
return Err(InvalidTransaction::Call.into())
}
let result = SubmitFinalityProofHelper::<T, I>::check_obsolete_from_extension(&call_info);
match result {
Ok(improved_by) =>
Ok(Some(VerifiedSubmitFinalityProofInfo { base: call_info, improved_by })),
Err(Error::<T, I>::OldHeader) => Err(InvalidTransaction::Stale.into()),
Err(_) => Err(InvalidTransaction::Call.into()),
}
}
}
impl<T: Config<I>, I: 'static> CallSubType<T, I> for T::RuntimeCall where
T::RuntimeCall: IsSubType<CallableCallFor<Pallet<T, I>, T>>
{
}
/// Extract finality proof info from the submitted header and justification.
pub(crate) fn submit_finality_proof_info_from_args<T: Config<I>, I: 'static>(
finality_target: &BridgedHeader<T, I>,
justification: &GrandpaJustification<BridgedHeader<T, I>>,
current_set_id: Option<SetId>,
is_free_execution_expected: bool,
) -> SubmitFinalityProofInfo<BridgedBlockNumber<T, I>> {
let block_number = *finality_target.number();
// the `submit_finality_proof` call will reject justifications with invalid, duplicate,
// unknown and extra signatures. It'll also reject justifications with less than necessary
// signatures. So we do not care about extra weight because of additional signatures here.
let precommits_len = justification.commit.precommits.len().saturated_into();
let required_precommits = precommits_len;
// We do care about extra weight because of more-than-expected headers in the votes
// ancestries. But we have problems computing extra weight for additional headers (weight of
// additional header is too small, so that our benchmarks aren't detecting that). So if there
// are more than expected headers in votes ancestries, we will treat the whole call weight
// as an extra weight.
let votes_ancestries_len = justification.votes_ancestries.len().saturated_into();
let extra_weight =
if votes_ancestries_len > T::BridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY {
T::WeightInfo::submit_finality_proof(precommits_len, votes_ancestries_len)
} else {
Weight::zero()
};
// check if the `finality_target` is a mandatory header. If so, we are ready to refund larger
// size
let is_mandatory_finality_target =
GrandpaConsensusLogReader::<BridgedBlockNumber<T, I>>::find_scheduled_change(
finality_target.digest(),
)
.is_some();
// we can estimate extra call size easily, without any additional significant overhead
let actual_call_size: u32 = finality_target
.encoded_size()
.saturating_add(justification.encoded_size())
.saturated_into();
let max_expected_call_size = max_expected_submit_finality_proof_arguments_size::<T::BridgedChain>(
is_mandatory_finality_target,
required_precommits,
);
let extra_size = actual_call_size.saturating_sub(max_expected_call_size);
SubmitFinalityProofInfo {
block_number,
current_set_id,
is_mandatory: is_mandatory_finality_target,
is_free_execution_expected,
extra_weight,
extra_size,
}
}
#[cfg(test)]
mod tests {
use crate::{
call_ext::CallSubType,
mock::{
run_test, test_header, FreeHeadersInterval, RuntimeCall, TestBridgedChain, TestNumber,
TestRuntime,
},
BestFinalized, Config, CurrentAuthoritySet, FreeHeadersRemaining, PalletOperatingMode,
StoredAuthoritySet, SubmitFinalityProofInfo, WeightInfo,
};
use bp_header_chain::ChainWithGrandpa;
use bp_runtime::{BasicOperatingMode, HeaderId};
use bp_test_utils::{
make_default_justification, make_justification_for_header, JustificationGeneratorParams,
TEST_GRANDPA_SET_ID,
};
use codec::Encode;
use frame_support::weights::Weight;
use sp_runtime::{testing::DigestItem, traits::Header as _, SaturatedConversion};
fn validate_block_submit(num: TestNumber) -> bool {
let bridge_grandpa_call = crate::Call::<TestRuntime, ()>::submit_finality_proof_ex {
finality_target: Box::new(test_header(num)),
justification: make_default_justification(&test_header(num)),
// not initialized => zero
current_set_id: 0,
is_free_execution_expected: false,
};
RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa(
bridge_grandpa_call,
))
.is_ok()
}
fn sync_to_header_10() {
let header10_hash = sp_core::H256::default();
BestFinalized::<TestRuntime, ()>::put(HeaderId(10, header10_hash));
}
#[test]
fn extension_rejects_obsolete_header() {
run_test(|| {
// when current best finalized is #10 and we're trying to import header#5 => tx is
// rejected
sync_to_header_10();
assert!(!validate_block_submit(5));
});
}
#[test]
fn extension_rejects_same_header() {
run_test(|| {
// when current best finalized is #10 and we're trying to import header#10 => tx is
// rejected
sync_to_header_10();
assert!(!validate_block_submit(10));
});
}
#[test]
fn extension_rejects_new_header_if_pallet_is_halted() {
run_test(|| {
// when pallet is halted => tx is rejected
sync_to_header_10();
PalletOperatingMode::<TestRuntime, ()>::put(BasicOperatingMode::Halted);
assert!(!validate_block_submit(15));
});
}
#[test]
fn extension_rejects_new_header_if_set_id_is_invalid() {
run_test(|| {
// when set id is different from the passed one => tx is rejected
sync_to_header_10();
let next_set = StoredAuthoritySet::<TestRuntime, ()>::try_new(vec![], 0x42).unwrap();
CurrentAuthoritySet::<TestRuntime, ()>::put(next_set);
assert!(!validate_block_submit(15));
});
}
#[test]
fn extension_rejects_new_header_if_free_execution_is_requested_and_free_submissions_are_not_accepted(
) {
run_test(|| {
let bridge_grandpa_call = crate::Call::<TestRuntime, ()>::submit_finality_proof_ex {
finality_target: Box::new(test_header(10 + FreeHeadersInterval::get() as u64)),
justification: make_default_justification(&test_header(
10 + FreeHeadersInterval::get() as u64,
)),
current_set_id: 0,
is_free_execution_expected: true,
};
sync_to_header_10();
// when we can accept free headers => Ok
FreeHeadersRemaining::<TestRuntime, ()>::put(2);
assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa(
bridge_grandpa_call.clone(),
),)
.is_ok());
// when we can NOT accept free headers => Err
FreeHeadersRemaining::<TestRuntime, ()>::put(0);
assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa(
bridge_grandpa_call.clone(),
),)
.is_err());
// when called outside of transaction => Ok
FreeHeadersRemaining::<TestRuntime, ()>::kill();
assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa(
bridge_grandpa_call,
),)
.is_ok());
})
}
#[test]
fn extension_rejects_new_header_if_free_execution_is_requested_and_improved_by_is_below_expected(
) {
run_test(|| {
let bridge_grandpa_call = crate::Call::<TestRuntime, ()>::submit_finality_proof_ex {
finality_target: Box::new(test_header(100)),
justification: make_default_justification(&test_header(100)),
current_set_id: 0,
is_free_execution_expected: true,
};
sync_to_header_10();
// when `improved_by` is less than the free interval
BestFinalized::<TestRuntime, ()>::put(HeaderId(
100 - FreeHeadersInterval::get() as u64 + 1,
sp_core::H256::default(),
));
assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa(
bridge_grandpa_call.clone(),
),)
.is_err());
// when `improved_by` is equal to the free interval
BestFinalized::<TestRuntime, ()>::put(HeaderId(
100 - FreeHeadersInterval::get() as u64,
sp_core::H256::default(),
));
assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa(
bridge_grandpa_call.clone(),
),)
.is_ok());
// when `improved_by` is larger than the free interval
BestFinalized::<TestRuntime, ()>::put(HeaderId(
100 - FreeHeadersInterval::get() as u64 - 1,
sp_core::H256::default(),
));
assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa(
bridge_grandpa_call.clone(),
),)
.is_ok());
// when `improved_by` is less than the free interval BUT it is a mandatory header
let mut mandatory_header = test_header(100);
let consensus_log = sp_consensus_grandpa::ConsensusLog::<TestNumber>::ScheduledChange(
sp_consensus_grandpa::ScheduledChange {
next_authorities: bp_test_utils::authority_list(),
delay: 0,
},
);
mandatory_header.digest = sp_runtime::Digest {
logs: vec![DigestItem::Consensus(
sp_consensus_grandpa::GRANDPA_ENGINE_ID,
consensus_log.encode(),
)],
};
let justification = make_justification_for_header(JustificationGeneratorParams {
header: mandatory_header.clone(),
set_id: 1,
..Default::default()
});
let bridge_grandpa_call = crate::Call::<TestRuntime, ()>::submit_finality_proof_ex {
finality_target: Box::new(mandatory_header),
justification,
current_set_id: 0,
is_free_execution_expected: true,
};
BestFinalized::<TestRuntime, ()>::put(HeaderId(
100 - FreeHeadersInterval::get() as u64 + 1,
sp_core::H256::default(),
));
assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa(
bridge_grandpa_call.clone(),
),)
.is_ok());
})
}
#[test]
fn extension_accepts_new_header() {
run_test(|| {
// when current best finalized is #10 and we're trying to import header#15 => tx is
// accepted
sync_to_header_10();
assert!(validate_block_submit(15));
});
}
#[test]
fn submit_finality_proof_info_is_parsed() {
// when `submit_finality_proof` is used, `current_set_id` is set to `None`
let deprecated_call =
RuntimeCall::Grandpa(crate::Call::<TestRuntime, ()>::submit_finality_proof {
finality_target: Box::new(test_header(42)),
justification: make_default_justification(&test_header(42)),
});
assert_eq!(
deprecated_call.submit_finality_proof_info(),
Some(SubmitFinalityProofInfo {
block_number: 42,
current_set_id: None,
extra_weight: Weight::zero(),
extra_size: 0,
is_mandatory: false,
is_free_execution_expected: false,
})
);
// when `submit_finality_proof_ex` is used, `current_set_id` is set to `Some`
let deprecated_call =
RuntimeCall::Grandpa(crate::Call::<TestRuntime, ()>::submit_finality_proof_ex {
finality_target: Box::new(test_header(42)),
justification: make_default_justification(&test_header(42)),
current_set_id: 777,
is_free_execution_expected: false,
});
assert_eq!(
deprecated_call.submit_finality_proof_info(),
Some(SubmitFinalityProofInfo {
block_number: 42,
current_set_id: Some(777),
extra_weight: Weight::zero(),
extra_size: 0,
is_mandatory: false,
is_free_execution_expected: false,
})
);
}
#[test]
fn extension_returns_correct_extra_size_if_call_arguments_are_too_large() {
// when call arguments are below our limit => no refund
let small_finality_target = test_header(1);
let justification_params = JustificationGeneratorParams {
header: small_finality_target.clone(),
..Default::default()
};
let small_justification = make_justification_for_header(justification_params);
let small_call = RuntimeCall::Grandpa(crate::Call::submit_finality_proof_ex {
finality_target: Box::new(small_finality_target),
justification: small_justification,
current_set_id: TEST_GRANDPA_SET_ID,
is_free_execution_expected: false,
});
assert_eq!(small_call.submit_finality_proof_info().unwrap().extra_size, 0);
// when call arguments are too large => partial refund
let mut large_finality_target = test_header(1);
large_finality_target
.digest_mut()
.push(DigestItem::Other(vec![42u8; 1024 * 1024]));
let justification_params = JustificationGeneratorParams {
header: large_finality_target.clone(),
..Default::default()
};
let large_justification = make_justification_for_header(justification_params);
let large_call = RuntimeCall::Grandpa(crate::Call::submit_finality_proof_ex {
finality_target: Box::new(large_finality_target),
justification: large_justification,
current_set_id: TEST_GRANDPA_SET_ID,
is_free_execution_expected: false,
});
assert_ne!(large_call.submit_finality_proof_info().unwrap().extra_size, 0);
}
#[test]
fn extension_returns_correct_extra_weight_if_there_are_too_many_headers_in_votes_ancestry() {
let finality_target = test_header(1);
let mut justification_params = JustificationGeneratorParams {
header: finality_target.clone(),
ancestors: TestBridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY,
..Default::default()
};
// when there are `REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY` headers => no refund
let justification = make_justification_for_header(justification_params.clone());
let call = RuntimeCall::Grandpa(crate::Call::submit_finality_proof_ex {
finality_target: Box::new(finality_target.clone()),
justification,
current_set_id: TEST_GRANDPA_SET_ID,
is_free_execution_expected: false,
});
assert_eq!(call.submit_finality_proof_info().unwrap().extra_weight, Weight::zero());
// when there are `REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY + 1` headers => full refund
justification_params.ancestors += 1;
let justification = make_justification_for_header(justification_params);
let call_weight = <TestRuntime as Config>::WeightInfo::submit_finality_proof(
justification.commit.precommits.len().saturated_into(),
justification.votes_ancestries.len().saturated_into(),
);
let call = RuntimeCall::Grandpa(crate::Call::submit_finality_proof_ex {
finality_target: Box::new(finality_target),
justification,
current_set_id: TEST_GRANDPA_SET_ID,
is_free_execution_expected: false,
});
assert_eq!(call.submit_finality_proof_info().unwrap().extra_weight, call_weight);
}
#[test]
fn check_obsolete_submit_finality_proof_returns_correct_improved_by() {
run_test(|| {
fn make_call(number: u64) -> RuntimeCall {
RuntimeCall::Grandpa(crate::Call::<TestRuntime, ()>::submit_finality_proof_ex {
finality_target: Box::new(test_header(number)),
justification: make_default_justification(&test_header(number)),
current_set_id: 0,
is_free_execution_expected: false,
})
}
sync_to_header_10();
// when the difference between headers is 1
assert_eq!(
RuntimeCall::check_obsolete_submit_finality_proof(&make_call(11))
.unwrap()
.unwrap()
.improved_by,
1,
);
// when the difference between headers is 2
assert_eq!(
RuntimeCall::check_obsolete_submit_finality_proof(&make_call(12))
.unwrap()
.unwrap()
.improved_by,
2,
);
})
}
#[test]
fn check_obsolete_submit_finality_proof_ignores_other_calls() {
run_test(|| {
let call =
RuntimeCall::System(frame_system::Call::<TestRuntime>::remark { remark: vec![42] });
assert_eq!(RuntimeCall::check_obsolete_submit_finality_proof(&call), Ok(None));
})
}
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Substrate GRANDPA Pallet
//!
//! This pallet is an on-chain GRANDPA light client for Substrate based chains.
//!
//! This pallet achieves this by trustlessly verifying GRANDPA finality proofs on-chain. Once
//! verified, finalized headers are stored in the pallet, thereby creating a sparse header chain.
//! This sparse header chain can be used as a source of truth for other higher-level applications.
//!
//! The pallet is responsible for tracking GRANDPA validator set hand-offs. We only import headers
//! with justifications signed by the current validator set we know of. The header is inspected for
//! a `ScheduledChanges` digest item, which is then used to update to next validator set.
//!
//! Since this pallet only tracks finalized headers it does not deal with forks. Forks can only
//! occur if the GRANDPA validator set on the bridged chain is either colluding or there is a severe
//! bug causing resulting in an equivocation. Such events are outside the scope of this pallet.
//! Shall the fork occur on the bridged chain governance intervention will be required to
//! re-initialize the bridge and track the right fork.
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
pub use storage_types::StoredAuthoritySet;
use bp_header_chain::{
justification::GrandpaJustification, AuthoritySet, ChainWithGrandpa, GrandpaConsensusLogReader,
HeaderChain, InitializationData, StoredHeaderData, StoredHeaderDataBuilder,
StoredHeaderGrandpaInfo,
};
use bp_runtime::{BlockNumberOf, HashOf, HasherOf, HeaderId, HeaderOf, OwnedBridgeModule};
use frame_support::{dispatch::PostDispatchInfo, ensure, DefaultNoBound};
use sp_consensus_grandpa::SetId;
use sp_runtime::{
traits::{Header as HeaderT, Zero},
SaturatedConversion,
};
use sp_std::{boxed::Box, convert::TryInto, prelude::*};
mod call_ext;
#[cfg(test)]
mod mock;
mod storage_types;
/// Module, containing weights for this pallet.
pub mod weights;
pub mod weights_ext;
#[cfg(feature = "runtime-benchmarks")]
pub mod benchmarking;
// Re-export in crate namespace for `construct_runtime!`
pub use call_ext::*;
pub use pallet::*;
pub use weights::WeightInfo;
pub use weights_ext::WeightInfoExt;
/// The target that will be used when publishing logs related to this pallet.
pub const LOG_TARGET: &str = "runtime::bridge-grandpa";
/// Bridged chain from the pallet configuration.
pub type BridgedChain<T, I> = <T as Config<I>>::BridgedChain;
/// Block number of the bridged chain.
pub type BridgedBlockNumber<T, I> = BlockNumberOf<<T as Config<I>>::BridgedChain>;
/// Block hash of the bridged chain.
pub type BridgedBlockHash<T, I> = HashOf<<T as Config<I>>::BridgedChain>;
/// Block id of the bridged chain.
pub type BridgedBlockId<T, I> = HeaderId<BridgedBlockHash<T, I>, BridgedBlockNumber<T, I>>;
/// Hasher of the bridged chain.
pub type BridgedBlockHasher<T, I> = HasherOf<<T as Config<I>>::BridgedChain>;
/// Header of the bridged chain.
pub type BridgedHeader<T, I> = HeaderOf<<T as Config<I>>::BridgedChain>;
/// Header data of the bridged chain that is stored at this chain by this pallet.
pub type BridgedStoredHeaderData<T, I> =
StoredHeaderData<BridgedBlockNumber<T, I>, BridgedBlockHash<T, I>>;
#[frame_support::pallet]
pub mod pallet {
use super::*;
use bp_runtime::BasicOperatingMode;
use frame_support::pallet_prelude::*;
use frame_system::pallet_prelude::*;
#[pallet::config]
pub trait Config<I: 'static = ()>: frame_system::Config {
/// The overarching event type.
type RuntimeEvent: From<Event<Self, I>>
+ IsType<<Self as frame_system::Config>::RuntimeEvent>;
/// The chain we are bridging to here.
type BridgedChain: ChainWithGrandpa;
/// Maximal number of "free" header transactions per block.
///
/// To be able to track the bridged chain, the pallet requires all headers that are
/// changing GRANDPA authorities set at the bridged chain (we call them mandatory).
/// So it is a common good deed to submit mandatory headers to the pallet.
///
/// The pallet may be configured (see `[Self::FreeHeadersInterval]`) to import some
/// non-mandatory headers for free as well. It also may be treated as a common good
/// deed, because it may help to reduce bridge fees - this cost may be deducted from
/// bridge fees, paid by message senders.
///
/// However, if the bridged chain gets compromised, its validators may generate as many
/// "free" headers as they want. And they may fill the whole block (at this chain) for
/// free. This constants limits number of calls that we may refund in a single block.
/// All calls above this limit are accepted, but are not refunded.
#[pallet::constant]
type MaxFreeHeadersPerBlock: Get<u32>;
/// The distance between bridged chain headers, that may be submitted for free. The
/// first free header is header number zero, the next one is header number
/// `FreeHeadersInterval::get()` or any of its descendant if that header has not
/// bee submitted. In other words, interval between free headers should be at least
/// `FreeHeadersInterval`.
#[pallet::constant]
type FreeHeadersInterval: Get<Option<u32>>;
/// Maximal number of finalized headers to keep in the storage.
///
/// The setting is there to prevent growing the on-chain state indefinitely. Note
/// the setting does not relate to block numbers - we will simply keep as much items
/// in the storage, so it doesn't guarantee any fixed timeframe for finality headers.
///
/// Incautious change of this constant may lead to orphan entries in the runtime storage.
#[pallet::constant]
type HeadersToKeep: Get<u32>;
/// Weights gathered through benchmarking.
type WeightInfo: WeightInfoExt;
}
#[pallet::pallet]
pub struct Pallet<T, I = ()>(PhantomData<(T, I)>);
#[pallet::hooks]
impl<T: Config<I>, I: 'static> Hooks<BlockNumberFor<T>> for Pallet<T, I> {
fn on_initialize(_n: BlockNumberFor<T>) -> Weight {
FreeHeadersRemaining::<T, I>::put(T::MaxFreeHeadersPerBlock::get());
Weight::zero()
}
fn on_finalize(_n: BlockNumberFor<T>) {
FreeHeadersRemaining::<T, I>::kill();
}
}
impl<T: Config<I>, I: 'static> OwnedBridgeModule<T> for Pallet<T, I> {
const LOG_TARGET: &'static str = LOG_TARGET;
type OwnerStorage = PalletOwner<T, I>;
type OperatingMode = BasicOperatingMode;
type OperatingModeStorage = PalletOperatingMode<T, I>;
}
#[pallet::call]
impl<T: Config<I>, I: 'static> Pallet<T, I> {
/// This call is deprecated and will be removed around May 2024. Use the
/// `submit_finality_proof_ex` instead. Semantically, this call is an equivalent of the
/// `submit_finality_proof_ex` call without current authority set id check.
#[pallet::call_index(0)]
#[pallet::weight(T::WeightInfo::submit_finality_proof_weight(
justification.commit.precommits.len().saturated_into(),
justification.votes_ancestries.len().saturated_into(),
))]
#[allow(deprecated)]
#[deprecated(
note = "`submit_finality_proof` will be removed in May 2024. Use `submit_finality_proof_ex` instead."
)]
pub fn submit_finality_proof(
origin: OriginFor<T>,
finality_target: Box<BridgedHeader<T, I>>,
justification: GrandpaJustification<BridgedHeader<T, I>>,
) -> DispatchResultWithPostInfo {
Self::submit_finality_proof_ex(
origin,
finality_target,
justification,
// the `submit_finality_proof_ex` also reads this value, but it is done from the
// cache, so we don't treat it as an additional db access
<CurrentAuthoritySet<T, I>>::get().set_id,
// cannot enforce free execution using this call
false,
)
}
/// Bootstrap the bridge pallet with an initial header and authority set from which to sync.
///
/// The initial configuration provided does not need to be the genesis header of the bridged
/// chain, it can be any arbitrary header. You can also provide the next scheduled set
/// change if it is already know.
///
/// This function is only allowed to be called from a trusted origin and writes to storage
/// with practically no checks in terms of the validity of the data. It is important that
/// you ensure that valid data is being passed in.
#[pallet::call_index(1)]
#[pallet::weight((T::DbWeight::get().reads_writes(2, 5), DispatchClass::Operational))]
pub fn initialize(
origin: OriginFor<T>,
init_data: super::InitializationData<BridgedHeader<T, I>>,
) -> DispatchResultWithPostInfo {
Self::ensure_owner_or_root(origin)?;
let init_allowed = !<BestFinalized<T, I>>::exists();
ensure!(init_allowed, <Error<T, I>>::AlreadyInitialized);
initialize_bridge::<T, I>(init_data.clone())?;
log::info!(
target: LOG_TARGET,
"Pallet has been initialized with the following parameters: {:?}",
init_data
);
Ok(().into())
}
/// Change `PalletOwner`.
///
/// May only be called either by root, or by `PalletOwner`.
#[pallet::call_index(2)]
#[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))]
pub fn set_owner(origin: OriginFor<T>, new_owner: Option<T::AccountId>) -> DispatchResult {
<Self as OwnedBridgeModule<_>>::set_owner(origin, new_owner)
}
/// Halt or resume all pallet operations.
///
/// May only be called either by root, or by `PalletOwner`.
#[pallet::call_index(3)]
#[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))]
pub fn set_operating_mode(
origin: OriginFor<T>,
operating_mode: BasicOperatingMode,
) -> DispatchResult {
<Self as OwnedBridgeModule<_>>::set_operating_mode(origin, operating_mode)
}
/// Verify a target header is finalized according to the given finality proof. The proof
/// is assumed to be signed by GRANDPA authorities set with `current_set_id` id.
///
/// It will use the underlying storage pallet to fetch information about the current
/// authorities and best finalized header in order to verify that the header is finalized.
///
/// If successful in verification, it will write the target header to the underlying storage
/// pallet.
///
/// The call fails if:
///
/// - the pallet is halted;
///
/// - the pallet knows better header than the `finality_target`;
///
/// - the id of best GRANDPA authority set, known to the pallet is not equal to the
/// `current_set_id`;
///
/// - verification is not optimized or invalid;
///
/// - header contains forced authorities set change or change with non-zero delay.
///
/// The `is_free_execution_expected` parameter is not really used inside the call. It is
/// used by the transaction extension, which should be registered at the runtime level. If
/// this parameter is `true`, the transaction will be treated as invalid, if the call won't
/// be executed for free. If transaction extension is not used by the runtime, this
/// parameter is not used at all.
#[pallet::call_index(4)]
#[pallet::weight(T::WeightInfo::submit_finality_proof_weight(
justification.commit.precommits.len().saturated_into(),
justification.votes_ancestries.len().saturated_into(),
))]
pub fn submit_finality_proof_ex(
origin: OriginFor<T>,
finality_target: Box<BridgedHeader<T, I>>,
justification: GrandpaJustification<BridgedHeader<T, I>>,
current_set_id: sp_consensus_grandpa::SetId,
_is_free_execution_expected: bool,
) -> DispatchResultWithPostInfo {
Self::ensure_not_halted().map_err(Error::<T, I>::BridgeModule)?;
ensure_signed(origin)?;
let (hash, number) = (finality_target.hash(), *finality_target.number());
log::trace!(
target: LOG_TARGET,
"Going to try and finalize header {:?}",
finality_target
);
// it checks whether the `number` is better than the current best block number
// and whether the `current_set_id` matches the best known set id
let improved_by =
SubmitFinalityProofHelper::<T, I>::check_obsolete(number, Some(current_set_id))?;
let authority_set = <CurrentAuthoritySet<T, I>>::get();
let unused_proof_size = authority_set.unused_proof_size();
let set_id = authority_set.set_id;
let authority_set: AuthoritySet = authority_set.into();
verify_justification::<T, I>(&justification, hash, number, authority_set)?;
let maybe_new_authority_set =
try_enact_authority_change::<T, I>(&finality_target, set_id)?;
let may_refund_call_fee = may_refund_call_fee::<T, I>(
maybe_new_authority_set.is_some(),
&finality_target,
&justification,
current_set_id,
improved_by,
);
if may_refund_call_fee {
on_free_header_imported::<T, I>();
}
insert_header::<T, I>(*finality_target, hash);
// mandatory header is a header that changes authorities set. The pallet can't go
// further without importing this header. So every bridge MUST import mandatory headers.
//
// We don't want to charge extra costs for mandatory operations. So relayer is not
// paying fee for mandatory headers import transactions.
//
// If size/weight of the call is exceeds our estimated limits, the relayer still needs
// to pay for the transaction.
let pays_fee = if may_refund_call_fee { Pays::No } else { Pays::Yes };
log::info!(
target: LOG_TARGET,
"Successfully imported finalized header with hash {:?}! Free: {}",
hash,
if may_refund_call_fee { "Yes" } else { "No" },
);
// the proof size component of the call weight assumes that there are
// `MaxBridgedAuthorities` in the `CurrentAuthoritySet` (we use `MaxEncodedLen`
// estimation). But if their number is lower, then we may "refund" some `proof_size`,
// making proof smaller and leaving block space to other useful transactions
let pre_dispatch_weight = T::WeightInfo::submit_finality_proof(
justification.commit.precommits.len().saturated_into(),
justification.votes_ancestries.len().saturated_into(),
);
let actual_weight = pre_dispatch_weight
.set_proof_size(pre_dispatch_weight.proof_size().saturating_sub(unused_proof_size));
Self::deposit_event(Event::UpdatedBestFinalizedHeader {
number,
hash,
grandpa_info: StoredHeaderGrandpaInfo {
finality_proof: justification,
new_verification_context: maybe_new_authority_set,
},
});
Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee })
}
}
/// Number of free header submissions that we may yet accept in the current block.
///
/// If the `FreeHeadersRemaining` hits zero, all following mandatory headers in the
/// current block are accepted with fee (`Pays::Yes` is returned).
///
/// The `FreeHeadersRemaining` is an ephemeral value that is set to
/// `MaxFreeHeadersPerBlock` at each block initialization and is killed on block
/// finalization. So it never ends up in the storage trie.
#[pallet::storage]
#[pallet::whitelist_storage]
#[pallet::getter(fn free_mandatory_headers_remaining)]
pub type FreeHeadersRemaining<T: Config<I>, I: 'static = ()> =
StorageValue<_, u32, OptionQuery>;
/// Hash of the header used to bootstrap the pallet.
#[pallet::storage]
pub(super) type InitialHash<T: Config<I>, I: 'static = ()> =
StorageValue<_, BridgedBlockHash<T, I>, ValueQuery>;
/// Hash of the best finalized header.
#[pallet::storage]
#[pallet::getter(fn best_finalized)]
pub type BestFinalized<T: Config<I>, I: 'static = ()> =
StorageValue<_, BridgedBlockId<T, I>, OptionQuery>;
/// A ring buffer of imported hashes. Ordered by the insertion time.
#[pallet::storage]
pub(super) type ImportedHashes<T: Config<I>, I: 'static = ()> = StorageMap<
Hasher = Identity,
Key = u32,
Value = BridgedBlockHash<T, I>,
QueryKind = OptionQuery,
OnEmpty = GetDefault,
MaxValues = MaybeHeadersToKeep<T, I>,
>;
/// Current ring buffer position.
#[pallet::storage]
pub(super) type ImportedHashesPointer<T: Config<I>, I: 'static = ()> =
StorageValue<_, u32, ValueQuery>;
/// Relevant fields of imported headers.
#[pallet::storage]
pub type ImportedHeaders<T: Config<I>, I: 'static = ()> = StorageMap<
Hasher = Identity,
Key = BridgedBlockHash<T, I>,
Value = BridgedStoredHeaderData<T, I>,
QueryKind = OptionQuery,
OnEmpty = GetDefault,
MaxValues = MaybeHeadersToKeep<T, I>,
>;
/// The current GRANDPA Authority set.
#[pallet::storage]
pub type CurrentAuthoritySet<T: Config<I>, I: 'static = ()> =
StorageValue<_, StoredAuthoritySet<T, I>, ValueQuery>;
/// Optional pallet owner.
///
/// Pallet owner has a right to halt all pallet operations and then resume it. If it is
/// `None`, then there are no direct ways to halt/resume pallet operations, but other
/// runtime methods may still be used to do that (i.e. democracy::referendum to update halt
/// flag directly or call the `halt_operations`).
#[pallet::storage]
pub type PalletOwner<T: Config<I>, I: 'static = ()> =
StorageValue<_, T::AccountId, OptionQuery>;
/// The current operating mode of the pallet.
///
/// Depending on the mode either all, or no transactions will be allowed.
#[pallet::storage]
pub type PalletOperatingMode<T: Config<I>, I: 'static = ()> =
StorageValue<_, BasicOperatingMode, ValueQuery>;
#[pallet::genesis_config]
#[derive(DefaultNoBound)]
pub struct GenesisConfig<T: Config<I>, I: 'static = ()> {
/// Optional module owner account.
pub owner: Option<T::AccountId>,
/// Optional module initialization data.
pub init_data: Option<super::InitializationData<BridgedHeader<T, I>>>,
}
#[pallet::genesis_build]
impl<T: Config<I>, I: 'static> BuildGenesisConfig for GenesisConfig<T, I> {
fn build(&self) {
if let Some(ref owner) = self.owner {
<PalletOwner<T, I>>::put(owner);
}
if let Some(init_data) = self.init_data.clone() {
initialize_bridge::<T, I>(init_data).expect("genesis config is correct; qed");
} else {
// Since the bridge hasn't been initialized we shouldn't allow anyone to perform
// transactions.
<PalletOperatingMode<T, I>>::put(BasicOperatingMode::Halted);
}
}
}
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event<T: Config<I>, I: 'static = ()> {
/// Best finalized chain header has been updated to the header with given number and hash.
UpdatedBestFinalizedHeader {
/// Number of the new best finalized header.
number: BridgedBlockNumber<T, I>,
/// Hash of the new best finalized header.
hash: BridgedBlockHash<T, I>,
/// The Grandpa info associated to the new best finalized header.
grandpa_info: StoredHeaderGrandpaInfo<BridgedHeader<T, I>>,
},
}
#[pallet::error]
pub enum Error<T, I = ()> {
/// The given justification is invalid for the given header.
InvalidJustification,
/// The authority set from the underlying header chain is invalid.
InvalidAuthoritySet,
/// The header being imported is older than the best finalized header known to the pallet.
OldHeader,
/// The scheduled authority set change found in the header is unsupported by the pallet.
///
/// This is the case for non-standard (e.g forced) authority set changes.
UnsupportedScheduledChange,
/// The pallet is not yet initialized.
NotInitialized,
/// The pallet has already been initialized.
AlreadyInitialized,
/// Too many authorities in the set.
TooManyAuthoritiesInSet,
/// Error generated by the `OwnedBridgeModule` trait.
BridgeModule(bp_runtime::OwnedBridgeModuleError),
/// The `current_set_id` argument of the `submit_finality_proof_ex` doesn't match
/// the id of the current set, known to the pallet.
InvalidAuthoritySetId,
/// The submitter wanted free execution, but we can't fit more free transactions
/// to the block.
FreeHeadersLimitExceded,
/// The submitter wanted free execution, but the difference between best known and
/// bundled header numbers is below the `FreeHeadersInterval`.
BelowFreeHeaderInterval,
}
/// Called when new free header is imported.
pub fn on_free_header_imported<T: Config<I>, I: 'static>() {
FreeHeadersRemaining::<T, I>::mutate(|count| {
*count = match *count {
// never set to `None` here - the signed extension assumes that it is `None`
// outside of block execution - i.e. when transaction is validatied from
// the transaction pool
Some(count) => Some(count.saturating_sub(1)),
None => None,
}
});
}
/// Return true if we may refund transaction cost to the submitter. In other words,
/// this transaction is considered as common good deed w.r.t to pallet configuration.
fn may_refund_call_fee<T: Config<I>, I: 'static>(
is_mandatory_header: bool,
finality_target: &BridgedHeader<T, I>,
justification: &GrandpaJustification<BridgedHeader<T, I>>,
current_set_id: SetId,
improved_by: BridgedBlockNumber<T, I>,
) -> bool {
// if we have refunded too much at this block => not refunding
if FreeHeadersRemaining::<T, I>::get().unwrap_or(0) == 0 {
return false;
}
// if size/weight of call is larger than expected => not refunding
let call_info = submit_finality_proof_info_from_args::<T, I>(
&finality_target,
&justification,
Some(current_set_id),
// this function is called from the transaction body and we do not want
// to do MAY-be-free-executed checks here - they had to be done in the
// transaction extension before
false,
);
if !call_info.fits_limits() {
return false;
}
// if that's a mandatory header => refund
if is_mandatory_header {
return true;
}
// if configuration allows free non-mandatory headers and the header
// matches criteria => refund
if let Some(free_headers_interval) = T::FreeHeadersInterval::get() {
if improved_by >= free_headers_interval.into() {
return true;
}
}
false
}
/// Check the given header for a GRANDPA scheduled authority set change. If a change
/// is found it will be enacted immediately.
///
/// This function does not support forced changes, or scheduled changes with delays
/// since these types of changes are indicative of abnormal behavior from GRANDPA.
///
/// Returned value will indicate if a change was enacted or not.
pub(crate) fn try_enact_authority_change<T: Config<I>, I: 'static>(
header: &BridgedHeader<T, I>,
current_set_id: sp_consensus_grandpa::SetId,
) -> Result<Option<AuthoritySet>, DispatchError> {
// We don't support forced changes - at that point governance intervention is required.
ensure!(
GrandpaConsensusLogReader::<BridgedBlockNumber<T, I>>::find_forced_change(
header.digest()
)
.is_none(),
<Error<T, I>>::UnsupportedScheduledChange
);
if let Some(change) =
GrandpaConsensusLogReader::<BridgedBlockNumber<T, I>>::find_scheduled_change(
header.digest(),
) {
// GRANDPA only includes a `delay` for forced changes, so this isn't valid.
ensure!(change.delay == Zero::zero(), <Error<T, I>>::UnsupportedScheduledChange);
// TODO [#788]: Stop manually increasing the `set_id` here.
let next_authorities = StoredAuthoritySet::<T, I> {
authorities: change
.next_authorities
.try_into()
.map_err(|_| Error::<T, I>::TooManyAuthoritiesInSet)?,
set_id: current_set_id + 1,
};
// Since our header schedules a change and we know the delay is 0, it must also enact
// the change.
<CurrentAuthoritySet<T, I>>::put(&next_authorities);
log::info!(
target: LOG_TARGET,
"Transitioned from authority set {} to {}! New authorities are: {:?}",
current_set_id,
current_set_id + 1,
next_authorities,
);
return Ok(Some(next_authorities.into()))
};
Ok(None)
}
/// Verify a GRANDPA justification (finality proof) for a given header.
///
/// Will use the GRANDPA current authorities known to the pallet.
///
/// If successful it returns the decoded GRANDPA justification so we can refund any weight which
/// was overcharged in the initial call.
pub(crate) fn verify_justification<T: Config<I>, I: 'static>(
justification: &GrandpaJustification<BridgedHeader<T, I>>,
hash: BridgedBlockHash<T, I>,
number: BridgedBlockNumber<T, I>,
authority_set: bp_header_chain::AuthoritySet,
) -> Result<(), sp_runtime::DispatchError> {
use bp_header_chain::justification::verify_justification;
Ok(verify_justification::<BridgedHeader<T, I>>(
(hash, number),
&authority_set.try_into().map_err(|_| <Error<T, I>>::InvalidAuthoritySet)?,
justification,
)
.map_err(|e| {
log::error!(
target: LOG_TARGET,
"Received invalid justification for {:?}: {:?}",
hash,
e,
);
<Error<T, I>>::InvalidJustification
})?)
}
/// Import a previously verified header to the storage.
///
/// Note this function solely takes care of updating the storage and pruning old entries,
/// but does not verify the validity of such import.
pub(crate) fn insert_header<T: Config<I>, I: 'static>(
header: BridgedHeader<T, I>,
hash: BridgedBlockHash<T, I>,
) {
let index = <ImportedHashesPointer<T, I>>::get();
let pruning = <ImportedHashes<T, I>>::try_get(index);
<BestFinalized<T, I>>::put(HeaderId(*header.number(), hash));
<ImportedHeaders<T, I>>::insert(hash, header.build());
<ImportedHashes<T, I>>::insert(index, hash);
// Update ring buffer pointer and remove old header.
<ImportedHashesPointer<T, I>>::put((index + 1) % T::HeadersToKeep::get());
if let Ok(hash) = pruning {
log::debug!(target: LOG_TARGET, "Pruning old header: {:?}.", hash);
<ImportedHeaders<T, I>>::remove(hash);
}
}
/// Since this writes to storage with no real checks this should only be used in functions that
/// were called by a trusted origin.
pub(crate) fn initialize_bridge<T: Config<I>, I: 'static>(
init_params: super::InitializationData<BridgedHeader<T, I>>,
) -> Result<(), Error<T, I>> {
let super::InitializationData { header, authority_list, set_id, operating_mode } =
init_params;
let authority_set_length = authority_list.len();
let authority_set = StoredAuthoritySet::<T, I>::try_new(authority_list, set_id)
.map_err(|e| {
log::error!(
target: LOG_TARGET,
"Failed to initialize bridge. Number of authorities in the set {} is larger than the configured value {}",
authority_set_length,
T::BridgedChain::MAX_AUTHORITIES_COUNT,
);
e
})?;
let initial_hash = header.hash();
<InitialHash<T, I>>::put(initial_hash);
<ImportedHashesPointer<T, I>>::put(0);
insert_header::<T, I>(*header, initial_hash);
<CurrentAuthoritySet<T, I>>::put(authority_set);
<PalletOperatingMode<T, I>>::put(operating_mode);
Ok(())
}
/// Adapter for using `Config::HeadersToKeep` as `MaxValues` bound in our storage maps.
pub struct MaybeHeadersToKeep<T, I>(PhantomData<(T, I)>);
// this implementation is required to use the struct as `MaxValues`
impl<T: Config<I>, I: 'static> Get<Option<u32>> for MaybeHeadersToKeep<T, I> {
fn get() -> Option<u32> {
Some(T::HeadersToKeep::get())
}
}
/// Initialize pallet so that it is ready for inserting new header.
///
/// The function makes sure that the new insertion will cause the pruning of some old header.
///
/// Returns parent header for the new header.
#[cfg(feature = "runtime-benchmarks")]
pub(crate) fn bootstrap_bridge<T: Config<I>, I: 'static>(
init_params: super::InitializationData<BridgedHeader<T, I>>,
) -> BridgedHeader<T, I> {
let start_header = init_params.header.clone();
initialize_bridge::<T, I>(init_params).expect("benchmarks are correct");
// the most obvious way to cause pruning during next insertion would be to insert
// `HeadersToKeep` headers. But it'll make our benchmarks slow. So we will just play with
// our pruning ring-buffer.
assert_eq!(ImportedHashesPointer::<T, I>::get(), 1);
ImportedHashesPointer::<T, I>::put(0);
*start_header
}
}
impl<T: Config<I>, I: 'static> Pallet<T, I>
where
<T as frame_system::Config>::RuntimeEvent: TryInto<Event<T, I>>,
{
/// Get the GRANDPA justifications accepted in the current block.
pub fn synced_headers_grandpa_info() -> Vec<StoredHeaderGrandpaInfo<BridgedHeader<T, I>>> {
frame_system::Pallet::<T>::read_events_no_consensus()
.filter_map(|event| {
if let Event::<T, I>::UpdatedBestFinalizedHeader { grandpa_info, .. } =
event.event.try_into().ok()?
{
return Some(grandpa_info)
}
None
})
.collect()
}
}
/// Bridge GRANDPA pallet as header chain.
pub type GrandpaChainHeaders<T, I> = Pallet<T, I>;
impl<T: Config<I>, I: 'static> HeaderChain<BridgedChain<T, I>> for GrandpaChainHeaders<T, I> {
fn finalized_header_state_root(
header_hash: HashOf<BridgedChain<T, I>>,
) -> Option<HashOf<BridgedChain<T, I>>> {
ImportedHeaders::<T, I>::get(header_hash).map(|h| h.state_root)
}
}
/// (Re)initialize bridge with given header for using it in `pallet-bridge-messages` benchmarks.
#[cfg(feature = "runtime-benchmarks")]
pub fn initialize_for_benchmarks<T: Config<I>, I: 'static>(header: BridgedHeader<T, I>) {
initialize_bridge::<T, I>(InitializationData {
header: Box::new(header),
authority_list: sp_std::vec::Vec::new(), /* we don't verify any proofs in external
* benchmarks */
set_id: 0,
operating_mode: bp_runtime::BasicOperatingMode::Normal,
})
.expect("only used from benchmarks; benchmarks are correct; qed");
}
#[cfg(test)]
mod tests {
use super::*;
use crate::mock::{
run_test, test_header, FreeHeadersInterval, RuntimeEvent as TestEvent, RuntimeOrigin,
System, TestBridgedChain, TestHeader, TestNumber, TestRuntime, MAX_BRIDGED_AUTHORITIES,
};
use bp_header_chain::BridgeGrandpaCall;
use bp_runtime::BasicOperatingMode;
use bp_test_utils::{
authority_list, generate_owned_bridge_module_tests, make_default_justification,
make_justification_for_header, JustificationGeneratorParams, ALICE, BOB,
TEST_GRANDPA_SET_ID,
};
use codec::Encode;
use frame_support::{
assert_err, assert_noop, assert_ok,
dispatch::{Pays, PostDispatchInfo},
storage::generator::StorageValue,
};
use frame_system::{EventRecord, Phase};
use sp_consensus_grandpa::{ConsensusLog, GRANDPA_ENGINE_ID};
use sp_core::Get;
use sp_runtime::{Digest, DigestItem, DispatchError};
fn initialize_substrate_bridge() {
System::set_block_number(1);
System::reset_events();
assert_ok!(init_with_origin(RuntimeOrigin::root()));
}
fn init_with_origin(
origin: RuntimeOrigin,
) -> Result<
InitializationData<TestHeader>,
sp_runtime::DispatchErrorWithPostInfo<PostDispatchInfo>,
> {
let genesis = test_header(0);
let init_data = InitializationData {
header: Box::new(genesis),
authority_list: authority_list(),
set_id: TEST_GRANDPA_SET_ID,
operating_mode: BasicOperatingMode::Normal,
};
Pallet::<TestRuntime>::initialize(origin, init_data.clone()).map(|_| init_data)
}
fn submit_finality_proof(header: u8) -> frame_support::dispatch::DispatchResultWithPostInfo {
let header = test_header(header.into());
let justification = make_default_justification(&header);
Pallet::<TestRuntime>::submit_finality_proof_ex(
RuntimeOrigin::signed(1),
Box::new(header),
justification,
TEST_GRANDPA_SET_ID,
false,
)
}
fn submit_finality_proof_with_set_id(
header: u8,
set_id: u64,
) -> frame_support::dispatch::DispatchResultWithPostInfo {
let header = test_header(header.into());
let justification = make_justification_for_header(JustificationGeneratorParams {
header: header.clone(),
set_id,
..Default::default()
});
Pallet::<TestRuntime>::submit_finality_proof_ex(
RuntimeOrigin::signed(1),
Box::new(header),
justification,
set_id,
false,
)
}
fn submit_mandatory_finality_proof(
number: u8,
set_id: u64,
) -> frame_support::dispatch::DispatchResultWithPostInfo {
let mut header = test_header(number.into());
// to ease tests that are using `submit_mandatory_finality_proof`, we'll be using the
// same set for all sessions
let consensus_log =
ConsensusLog::<TestNumber>::ScheduledChange(sp_consensus_grandpa::ScheduledChange {
next_authorities: authority_list(),
delay: 0,
});
header.digest =
Digest { logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())] };
let justification = make_justification_for_header(JustificationGeneratorParams {
header: header.clone(),
set_id,
..Default::default()
});
Pallet::<TestRuntime>::submit_finality_proof_ex(
RuntimeOrigin::signed(1),
Box::new(header),
justification,
set_id,
false,
)
}
fn next_block() {
use frame_support::traits::OnInitialize;
let current_number = frame_system::Pallet::<TestRuntime>::block_number();
frame_system::Pallet::<TestRuntime>::set_block_number(current_number + 1);
let _ = Pallet::<TestRuntime>::on_initialize(current_number);
}
fn change_log(delay: u64) -> Digest {
let consensus_log =
ConsensusLog::<TestNumber>::ScheduledChange(sp_consensus_grandpa::ScheduledChange {
next_authorities: vec![(ALICE.into(), 1), (BOB.into(), 1)],
delay,
});
Digest { logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())] }
}
fn forced_change_log(delay: u64) -> Digest {
let consensus_log = ConsensusLog::<TestNumber>::ForcedChange(
delay,
sp_consensus_grandpa::ScheduledChange {
next_authorities: vec![(ALICE.into(), 1), (BOB.into(), 1)],
delay,
},
);
Digest { logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())] }
}
fn many_authorities_log() -> Digest {
let consensus_log =
ConsensusLog::<TestNumber>::ScheduledChange(sp_consensus_grandpa::ScheduledChange {
next_authorities: std::iter::repeat((ALICE.into(), 1))
.take(MAX_BRIDGED_AUTHORITIES as usize + 1)
.collect(),
delay: 0,
});
Digest { logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())] }
}
#[test]
fn init_root_or_owner_origin_can_initialize_pallet() {
run_test(|| {
assert_noop!(init_with_origin(RuntimeOrigin::signed(1)), DispatchError::BadOrigin);
assert_ok!(init_with_origin(RuntimeOrigin::root()));
// Reset storage so we can initialize the pallet again
BestFinalized::<TestRuntime>::kill();
PalletOwner::<TestRuntime>::put(2);
assert_ok!(init_with_origin(RuntimeOrigin::signed(2)));
})
}
#[test]
fn init_storage_entries_are_correctly_initialized() {
run_test(|| {
assert_eq!(BestFinalized::<TestRuntime>::get(), None,);
assert_eq!(Pallet::<TestRuntime>::best_finalized(), None);
assert_eq!(PalletOperatingMode::<TestRuntime>::try_get(), Err(()));
let init_data = init_with_origin(RuntimeOrigin::root()).unwrap();
assert!(<ImportedHeaders<TestRuntime>>::contains_key(init_data.header.hash()));
assert_eq!(BestFinalized::<TestRuntime>::get().unwrap().1, init_data.header.hash());
assert_eq!(
CurrentAuthoritySet::<TestRuntime>::get().authorities,
init_data.authority_list
);
assert_eq!(
PalletOperatingMode::<TestRuntime>::try_get(),
Ok(BasicOperatingMode::Normal)
);
})
}
#[test]
fn init_can_only_initialize_pallet_once() {
run_test(|| {
initialize_substrate_bridge();
assert_noop!(
init_with_origin(RuntimeOrigin::root()),
<Error<TestRuntime>>::AlreadyInitialized
);
})
}
#[test]
fn init_fails_if_there_are_too_many_authorities_in_the_set() {
run_test(|| {
let genesis = test_header(0);
let init_data = InitializationData {
header: Box::new(genesis),
authority_list: std::iter::repeat(authority_list().remove(0))
.take(MAX_BRIDGED_AUTHORITIES as usize + 1)
.collect(),
set_id: 1,
operating_mode: BasicOperatingMode::Normal,
};
assert_noop!(
Pallet::<TestRuntime>::initialize(RuntimeOrigin::root(), init_data),
Error::<TestRuntime>::TooManyAuthoritiesInSet,
);
});
}
#[test]
fn pallet_rejects_transactions_if_halted() {
run_test(|| {
initialize_substrate_bridge();
assert_ok!(Pallet::<TestRuntime>::set_operating_mode(
RuntimeOrigin::root(),
BasicOperatingMode::Halted
));
assert_noop!(
submit_finality_proof(1),
Error::<TestRuntime>::BridgeModule(bp_runtime::OwnedBridgeModuleError::Halted)
);
assert_ok!(Pallet::<TestRuntime>::set_operating_mode(
RuntimeOrigin::root(),
BasicOperatingMode::Normal
));
assert_ok!(submit_finality_proof(1));
})
}
#[test]
fn pallet_rejects_header_if_not_initialized_yet() {
run_test(|| {
assert_noop!(submit_finality_proof(1), Error::<TestRuntime>::NotInitialized);
});
}
#[test]
fn succesfully_imports_header_with_valid_finality() {
run_test(|| {
initialize_substrate_bridge();
let header_number = 1;
let header = test_header(header_number.into());
let justification = make_default_justification(&header);
let pre_dispatch_weight = <TestRuntime as Config>::WeightInfo::submit_finality_proof(
justification.commit.precommits.len().try_into().unwrap_or(u32::MAX),
justification.votes_ancestries.len().try_into().unwrap_or(u32::MAX),
);
let result = submit_finality_proof(header_number);
assert_ok!(result);
assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::Yes);
// our test config assumes 2048 max authorities and we are just using couple
let pre_dispatch_proof_size = pre_dispatch_weight.proof_size();
let actual_proof_size = result.unwrap().actual_weight.unwrap().proof_size();
assert!(actual_proof_size > 0);
assert!(
actual_proof_size < pre_dispatch_proof_size,
"Actual proof size {actual_proof_size} must be less than the pre-dispatch {pre_dispatch_proof_size}",
);
let header = test_header(1);
assert_eq!(<BestFinalized<TestRuntime>>::get().unwrap().1, header.hash());
assert!(<ImportedHeaders<TestRuntime>>::contains_key(header.hash()));
assert_eq!(
System::events(),
vec![EventRecord {
phase: Phase::Initialization,
event: TestEvent::Grandpa(Event::UpdatedBestFinalizedHeader {
number: *header.number(),
hash: header.hash(),
grandpa_info: StoredHeaderGrandpaInfo {
finality_proof: justification.clone(),
new_verification_context: None,
},
}),
topics: vec![],
}],
);
assert_eq!(
Pallet::<TestRuntime>::synced_headers_grandpa_info(),
vec![StoredHeaderGrandpaInfo {
finality_proof: justification,
new_verification_context: None
}]
);
})
}
#[test]
fn rejects_justification_that_skips_authority_set_transition() {
run_test(|| {
initialize_substrate_bridge();
let header = test_header(1);
let next_set_id = 2;
let params = JustificationGeneratorParams::<TestHeader> {
set_id: next_set_id,
..Default::default()
};
let justification = make_justification_for_header(params);
assert_err!(
Pallet::<TestRuntime>::submit_finality_proof_ex(
RuntimeOrigin::signed(1),
Box::new(header.clone()),
justification.clone(),
TEST_GRANDPA_SET_ID,
false,
),
<Error<TestRuntime>>::InvalidJustification
);
assert_err!(
Pallet::<TestRuntime>::submit_finality_proof_ex(
RuntimeOrigin::signed(1),
Box::new(header),
justification,
next_set_id,
false,
),
<Error<TestRuntime>>::InvalidAuthoritySetId
);
})
}
#[test]
fn does_not_import_header_with_invalid_finality_proof() {
run_test(|| {
initialize_substrate_bridge();
let header = test_header(1);
let mut justification = make_default_justification(&header);
justification.round = 42;
assert_err!(
Pallet::<TestRuntime>::submit_finality_proof_ex(
RuntimeOrigin::signed(1),
Box::new(header),
justification,
TEST_GRANDPA_SET_ID,
false,
),
<Error<TestRuntime>>::InvalidJustification
);
})
}
#[test]
fn disallows_invalid_authority_set() {
run_test(|| {
let genesis = test_header(0);
let invalid_authority_list = vec![(ALICE.into(), u64::MAX), (BOB.into(), u64::MAX)];
let init_data = InitializationData {
header: Box::new(genesis),
authority_list: invalid_authority_list,
set_id: 1,
operating_mode: BasicOperatingMode::Normal,
};
assert_ok!(Pallet::<TestRuntime>::initialize(RuntimeOrigin::root(), init_data));
let header = test_header(1);
let justification = make_default_justification(&header);
assert_err!(
Pallet::<TestRuntime>::submit_finality_proof_ex(
RuntimeOrigin::signed(1),
Box::new(header),
justification,
TEST_GRANDPA_SET_ID,
false,
),
<Error<TestRuntime>>::InvalidAuthoritySet
);
})
}
#[test]
fn importing_header_ensures_that_chain_is_extended() {
run_test(|| {
initialize_substrate_bridge();
assert_ok!(submit_finality_proof(4));
assert_err!(submit_finality_proof(3), Error::<TestRuntime>::OldHeader);
assert_ok!(submit_finality_proof(5));
})
}
#[test]
fn importing_header_enacts_new_authority_set() {
run_test(|| {
initialize_substrate_bridge();
let next_set_id = 2;
let next_authorities = vec![(ALICE.into(), 1), (BOB.into(), 1)];
// Need to update the header digest to indicate that our header signals an authority set
// change. The change will be enacted when we import our header.
let mut header = test_header(2);
header.digest = change_log(0);
// Create a valid justification for the header
let justification = make_default_justification(&header);
// Let's import our test header
let result = Pallet::<TestRuntime>::submit_finality_proof_ex(
RuntimeOrigin::signed(1),
Box::new(header.clone()),
justification.clone(),
TEST_GRANDPA_SET_ID,
false,
);
assert_ok!(result);
assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::No);
// Make sure that our header is the best finalized
assert_eq!(<BestFinalized<TestRuntime>>::get().unwrap().1, header.hash());
assert!(<ImportedHeaders<TestRuntime>>::contains_key(header.hash()));
// Make sure that the authority set actually changed upon importing our header
assert_eq!(
<CurrentAuthoritySet<TestRuntime>>::get(),
StoredAuthoritySet::<TestRuntime, ()>::try_new(next_authorities, next_set_id)
.unwrap(),
);
// Here
assert_eq!(
System::events(),
vec![EventRecord {
phase: Phase::Initialization,
event: TestEvent::Grandpa(Event::UpdatedBestFinalizedHeader {
number: *header.number(),
hash: header.hash(),
grandpa_info: StoredHeaderGrandpaInfo {
finality_proof: justification.clone(),
new_verification_context: Some(
<CurrentAuthoritySet<TestRuntime>>::get().into()
),
},
}),
topics: vec![],
}],
);
assert_eq!(
Pallet::<TestRuntime>::synced_headers_grandpa_info(),
vec![StoredHeaderGrandpaInfo {
finality_proof: justification,
new_verification_context: Some(
<CurrentAuthoritySet<TestRuntime>>::get().into()
),
}]
);
})
}
#[test]
fn relayer_pays_tx_fee_when_submitting_huge_mandatory_header() {
run_test(|| {
initialize_substrate_bridge();
// let's prepare a huge authorities change header, which is definitely above size limits
let mut header = test_header(2);
header.digest = change_log(0);
header.digest.push(DigestItem::Other(vec![42u8; 1024 * 1024]));
let justification = make_default_justification(&header);
// without large digest item ^^^ the relayer would have paid zero transaction fee
// (`Pays::No`)
let result = Pallet::<TestRuntime>::submit_finality_proof_ex(
RuntimeOrigin::signed(1),
Box::new(header.clone()),
justification,
TEST_GRANDPA_SET_ID,
false,
);
assert_ok!(result);
assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::Yes);
// Make sure that our header is the best finalized
assert_eq!(<BestFinalized<TestRuntime>>::get().unwrap().1, header.hash());
assert!(<ImportedHeaders<TestRuntime>>::contains_key(header.hash()));
})
}
#[test]
fn relayer_pays_tx_fee_when_submitting_justification_with_long_ancestry_votes() {
run_test(|| {
initialize_substrate_bridge();
// let's prepare a huge authorities change header, which is definitely above weight
// limits
let mut header = test_header(2);
header.digest = change_log(0);
let justification = make_justification_for_header(JustificationGeneratorParams {
header: header.clone(),
ancestors: TestBridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY + 1,
..Default::default()
});
// without many headers in votes ancestries ^^^ the relayer would have paid zero
// transaction fee (`Pays::No`)
let result = Pallet::<TestRuntime>::submit_finality_proof_ex(
RuntimeOrigin::signed(1),
Box::new(header.clone()),
justification,
TEST_GRANDPA_SET_ID,
false,
);
assert_ok!(result);
assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::Yes);
// Make sure that our header is the best finalized
assert_eq!(<BestFinalized<TestRuntime>>::get().unwrap().1, header.hash());
assert!(<ImportedHeaders<TestRuntime>>::contains_key(header.hash()));
})
}
#[test]
fn importing_header_rejects_header_with_scheduled_change_delay() {
run_test(|| {
initialize_substrate_bridge();
// Need to update the header digest to indicate that our header signals an authority set
// change. However, the change doesn't happen until the next block.
let mut header = test_header(2);
header.digest = change_log(1);
// Create a valid justification for the header
let justification = make_default_justification(&header);
// Should not be allowed to import this header
assert_err!(
Pallet::<TestRuntime>::submit_finality_proof_ex(
RuntimeOrigin::signed(1),
Box::new(header),
justification,
TEST_GRANDPA_SET_ID,
false,
),
<Error<TestRuntime>>::UnsupportedScheduledChange
);
})
}
#[test]
fn importing_header_rejects_header_with_forced_changes() {
run_test(|| {
initialize_substrate_bridge();
// Need to update the header digest to indicate that it signals a forced authority set
// change.
let mut header = test_header(2);
header.digest = forced_change_log(0);
// Create a valid justification for the header
let justification = make_default_justification(&header);
// Should not be allowed to import this header
assert_err!(
Pallet::<TestRuntime>::submit_finality_proof_ex(
RuntimeOrigin::signed(1),
Box::new(header),
justification,
TEST_GRANDPA_SET_ID,
false,
),
<Error<TestRuntime>>::UnsupportedScheduledChange
);
})
}
#[test]
fn importing_header_rejects_header_with_too_many_authorities() {
run_test(|| {
initialize_substrate_bridge();
// Need to update the header digest to indicate that our header signals an authority set
// change. However, the change doesn't happen until the next block.
let mut header = test_header(2);
header.digest = many_authorities_log();
// Create a valid justification for the header
let justification = make_default_justification(&header);
// Should not be allowed to import this header
assert_err!(
Pallet::<TestRuntime>::submit_finality_proof_ex(
RuntimeOrigin::signed(1),
Box::new(header),
justification,
TEST_GRANDPA_SET_ID,
false,
),
<Error<TestRuntime>>::TooManyAuthoritiesInSet
);
});
}
#[test]
fn parse_finalized_storage_proof_rejects_proof_on_unknown_header() {
run_test(|| {
assert_noop!(
Pallet::<TestRuntime>::storage_proof_checker(Default::default(), vec![],)
.map(|_| ()),
bp_header_chain::HeaderChainError::UnknownHeader,
);
});
}
#[test]
fn parse_finalized_storage_accepts_valid_proof() {
run_test(|| {
let (state_root, storage_proof) = bp_runtime::craft_valid_storage_proof();
let mut header = test_header(2);
header.set_state_root(state_root);
let hash = header.hash();
<BestFinalized<TestRuntime>>::put(HeaderId(2, hash));
<ImportedHeaders<TestRuntime>>::insert(hash, header.build());
assert_ok!(
Pallet::<TestRuntime>::storage_proof_checker(hash, storage_proof).map(|_| ())
);
});
}
#[test]
fn rate_limiter_disallows_free_imports_once_limit_is_hit_in_single_block() {
run_test(|| {
initialize_substrate_bridge();
let result = submit_mandatory_finality_proof(1, 1);
assert_eq!(result.expect("call failed").pays_fee, Pays::No);
let result = submit_mandatory_finality_proof(2, 2);
assert_eq!(result.expect("call failed").pays_fee, Pays::No);
let result = submit_mandatory_finality_proof(3, 3);
assert_eq!(result.expect("call failed").pays_fee, Pays::Yes);
})
}
#[test]
fn rate_limiter_invalid_requests_do_not_count_towards_request_count() {
run_test(|| {
let submit_invalid_request = || {
let mut header = test_header(1);
header.digest = change_log(0);
let mut invalid_justification = make_default_justification(&header);
invalid_justification.round = 42;
Pallet::<TestRuntime>::submit_finality_proof_ex(
RuntimeOrigin::signed(1),
Box::new(header),
invalid_justification,
TEST_GRANDPA_SET_ID,
false,
)
};
initialize_substrate_bridge();
for _ in 0..<TestRuntime as Config>::MaxFreeHeadersPerBlock::get() + 1 {
assert_err!(submit_invalid_request(), <Error<TestRuntime>>::InvalidJustification);
}
// Can still submit free mandatory headers afterwards
let result = submit_mandatory_finality_proof(1, 1);
assert_eq!(result.expect("call failed").pays_fee, Pays::No);
let result = submit_mandatory_finality_proof(2, 2);
assert_eq!(result.expect("call failed").pays_fee, Pays::No);
let result = submit_mandatory_finality_proof(3, 3);
assert_eq!(result.expect("call failed").pays_fee, Pays::Yes);
})
}
#[test]
fn rate_limiter_allows_request_after_new_block_has_started() {
run_test(|| {
initialize_substrate_bridge();
let result = submit_mandatory_finality_proof(1, 1);
assert_eq!(result.expect("call failed").pays_fee, Pays::No);
let result = submit_mandatory_finality_proof(2, 2);
assert_eq!(result.expect("call failed").pays_fee, Pays::No);
let result = submit_mandatory_finality_proof(3, 3);
assert_eq!(result.expect("call failed").pays_fee, Pays::Yes);
next_block();
let result = submit_mandatory_finality_proof(4, 4);
assert_eq!(result.expect("call failed").pays_fee, Pays::No);
let result = submit_mandatory_finality_proof(5, 5);
assert_eq!(result.expect("call failed").pays_fee, Pays::No);
let result = submit_mandatory_finality_proof(6, 6);
assert_eq!(result.expect("call failed").pays_fee, Pays::Yes);
})
}
#[test]
fn rate_limiter_ignores_non_mandatory_headers() {
run_test(|| {
initialize_substrate_bridge();
let result = submit_finality_proof(1);
assert_eq!(result.expect("call failed").pays_fee, Pays::Yes);
let result = submit_mandatory_finality_proof(2, 1);
assert_eq!(result.expect("call failed").pays_fee, Pays::No);
let result = submit_finality_proof_with_set_id(3, 2);
assert_eq!(result.expect("call failed").pays_fee, Pays::Yes);
let result = submit_mandatory_finality_proof(4, 2);
assert_eq!(result.expect("call failed").pays_fee, Pays::No);
let result = submit_finality_proof_with_set_id(5, 3);
assert_eq!(result.expect("call failed").pays_fee, Pays::Yes);
let result = submit_mandatory_finality_proof(6, 3);
assert_eq!(result.expect("call failed").pays_fee, Pays::Yes);
})
}
#[test]
fn may_import_non_mandatory_header_for_free() {
run_test(|| {
initialize_substrate_bridge();
// set best finalized to `100`
const BEST: u8 = 12;
fn reset_best() {
BestFinalized::<TestRuntime, ()>::set(Some(HeaderId(
BEST as _,
Default::default(),
)));
}
// non-mandatory header is imported with fee
reset_best();
let non_free_header_number = BEST + FreeHeadersInterval::get() as u8 - 1;
let result = submit_finality_proof(non_free_header_number);
assert_eq!(result.unwrap().pays_fee, Pays::Yes);
// non-mandatory free header is imported without fee
reset_best();
let free_header_number = BEST + FreeHeadersInterval::get() as u8;
let result = submit_finality_proof(free_header_number);
assert_eq!(result.unwrap().pays_fee, Pays::No);
// another non-mandatory free header is imported without fee
let free_header_number = BEST + FreeHeadersInterval::get() as u8 * 2;
let result = submit_finality_proof(free_header_number);
assert_eq!(result.unwrap().pays_fee, Pays::No);
// now the rate limiter starts charging fees even for free headers
let free_header_number = BEST + FreeHeadersInterval::get() as u8 * 3;
let result = submit_finality_proof(free_header_number);
assert_eq!(result.unwrap().pays_fee, Pays::Yes);
// check that we can import for free if `improved_by` is larger
// than the free interval
next_block();
reset_best();
let free_header_number = FreeHeadersInterval::get() as u8 + 42;
let result = submit_finality_proof(free_header_number);
assert_eq!(result.unwrap().pays_fee, Pays::No);
// check that the rate limiter shares the counter between mandatory
// and free non-mandatory headers
next_block();
reset_best();
let free_header_number = BEST + FreeHeadersInterval::get() as u8 * 4;
let result = submit_finality_proof(free_header_number);
assert_eq!(result.unwrap().pays_fee, Pays::No);
let result = submit_mandatory_finality_proof(free_header_number + 1, 1);
assert_eq!(result.expect("call failed").pays_fee, Pays::No);
let result = submit_mandatory_finality_proof(free_header_number + 2, 2);
assert_eq!(result.expect("call failed").pays_fee, Pays::Yes);
});
}
#[test]
fn should_prune_headers_over_headers_to_keep_parameter() {
run_test(|| {
initialize_substrate_bridge();
assert_ok!(submit_finality_proof(1));
let first_header_hash = Pallet::<TestRuntime>::best_finalized().unwrap().hash();
next_block();
assert_ok!(submit_finality_proof(2));
next_block();
assert_ok!(submit_finality_proof(3));
next_block();
assert_ok!(submit_finality_proof(4));
next_block();
assert_ok!(submit_finality_proof(5));
next_block();
assert_ok!(submit_finality_proof(6));
assert!(
!ImportedHeaders::<TestRuntime, ()>::contains_key(first_header_hash),
"First header should be pruned.",
);
})
}
#[test]
fn storage_keys_computed_properly() {
assert_eq!(
PalletOperatingMode::<TestRuntime>::storage_value_final_key().to_vec(),
bp_header_chain::storage_keys::pallet_operating_mode_key("Grandpa").0,
);
assert_eq!(
CurrentAuthoritySet::<TestRuntime>::storage_value_final_key().to_vec(),
bp_header_chain::storage_keys::current_authority_set_key("Grandpa").0,
);
assert_eq!(
BestFinalized::<TestRuntime>::storage_value_final_key().to_vec(),
bp_header_chain::storage_keys::best_finalized_key("Grandpa").0,
);
}
#[test]
fn test_bridge_grandpa_call_is_correctly_defined() {
let header = test_header(0);
let init_data = InitializationData {
header: Box::new(header.clone()),
authority_list: authority_list(),
set_id: 1,
operating_mode: BasicOperatingMode::Normal,
};
let justification = make_default_justification(&header);
let direct_initialize_call =
Call::<TestRuntime>::initialize { init_data: init_data.clone() };
let indirect_initialize_call = BridgeGrandpaCall::<TestHeader>::initialize { init_data };
assert_eq!(direct_initialize_call.encode(), indirect_initialize_call.encode());
let direct_submit_finality_proof_call = Call::<TestRuntime>::submit_finality_proof {
finality_target: Box::new(header.clone()),
justification: justification.clone(),
};
let indirect_submit_finality_proof_call =
BridgeGrandpaCall::<TestHeader>::submit_finality_proof {
finality_target: Box::new(header),
justification,
};
assert_eq!(
direct_submit_finality_proof_call.encode(),
indirect_submit_finality_proof_call.encode()
);
}
generate_owned_bridge_module_tests!(BasicOperatingMode::Normal, BasicOperatingMode::Halted);
#[test]
fn maybe_headers_to_keep_returns_correct_value() {
assert_eq!(MaybeHeadersToKeep::<TestRuntime, ()>::get(), Some(mock::HeadersToKeep::get()));
}
#[test]
fn submit_finality_proof_requires_signed_origin() {
run_test(|| {
initialize_substrate_bridge();
let header = test_header(1);
let justification = make_default_justification(&header);
assert_noop!(
Pallet::<TestRuntime>::submit_finality_proof_ex(
RuntimeOrigin::root(),
Box::new(header),
justification,
TEST_GRANDPA_SET_ID,
false,
),
DispatchError::BadOrigin,
);
})
}
#[test]
fn on_free_header_imported_never_sets_to_none() {
run_test(|| {
FreeHeadersRemaining::<TestRuntime, ()>::set(Some(2));
on_free_header_imported::<TestRuntime, ()>();
assert_eq!(FreeHeadersRemaining::<TestRuntime, ()>::get(), Some(1));
on_free_header_imported::<TestRuntime, ()>();
assert_eq!(FreeHeadersRemaining::<TestRuntime, ()>::get(), Some(0));
on_free_header_imported::<TestRuntime, ()>();
assert_eq!(FreeHeadersRemaining::<TestRuntime, ()>::get(), Some(0));
})
}
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
// From construct_runtime macro
#![allow(clippy::from_over_into)]
use bp_header_chain::ChainWithGrandpa;
use bp_runtime::{Chain, ChainId};
use frame_support::{
construct_runtime, derive_impl, parameter_types, traits::Hooks, weights::Weight,
};
use sp_core::sr25519::Signature;
pub type AccountId = u64;
pub type TestHeader = sp_runtime::testing::Header;
pub type TestNumber = u64;
type Block = frame_system::mocking::MockBlock<TestRuntime>;
pub const MAX_BRIDGED_AUTHORITIES: u32 = 5;
use crate as grandpa;
construct_runtime! {
pub enum TestRuntime
{
System: frame_system::{Pallet, Call, Config<T>, Storage, Event<T>},
Grandpa: grandpa::{Pallet, Call, Event<T>},
}
}
#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)]
impl frame_system::Config for TestRuntime {
type Block = Block;
}
parameter_types! {
pub const MaxFreeHeadersPerBlock: u32 = 2;
pub const FreeHeadersInterval: u32 = 32;
pub const HeadersToKeep: u32 = 5;
}
impl grandpa::Config for TestRuntime {
type RuntimeEvent = RuntimeEvent;
type BridgedChain = TestBridgedChain;
type MaxFreeHeadersPerBlock = MaxFreeHeadersPerBlock;
type FreeHeadersInterval = FreeHeadersInterval;
type HeadersToKeep = HeadersToKeep;
type WeightInfo = ();
}
#[derive(Debug)]
pub struct TestBridgedChain;
impl Chain for TestBridgedChain {
const ID: ChainId = *b"tbch";
type BlockNumber = frame_system::pallet_prelude::BlockNumberFor<TestRuntime>;
type Hash = <TestRuntime as frame_system::Config>::Hash;
type Hasher = <TestRuntime as frame_system::Config>::Hashing;
type Header = TestHeader;
type AccountId = AccountId;
type Balance = u64;
type Nonce = u64;
type Signature = Signature;
fn max_extrinsic_size() -> u32 {
unreachable!()
}
fn max_extrinsic_weight() -> Weight {
unreachable!()
}
}
impl ChainWithGrandpa for TestBridgedChain {
const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = "";
const MAX_AUTHORITIES_COUNT: u32 = MAX_BRIDGED_AUTHORITIES;
const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8;
const MAX_MANDATORY_HEADER_SIZE: u32 = 256;
const AVERAGE_HEADER_SIZE: u32 = 64;
}
/// Return test externalities to use in tests.
pub fn new_test_ext() -> sp_io::TestExternalities {
sp_io::TestExternalities::new(Default::default())
}
/// Return test within default test externalities context.
pub fn run_test<T>(test: impl FnOnce() -> T) -> T {
new_test_ext().execute_with(|| {
let _ = Grandpa::on_initialize(0);
test()
})
}
/// Return test header with given number.
pub fn test_header(num: TestNumber) -> TestHeader {
// We wrap the call to avoid explicit type annotations in our tests
bp_test_utils::test_header(num)
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Wrappers for public types that are implementing `MaxEncodedLen`
use crate::{Config, Error};
use bp_header_chain::{AuthoritySet, ChainWithGrandpa};
use codec::{Decode, Encode, MaxEncodedLen};
use frame_support::{traits::Get, BoundedVec, CloneNoBound, RuntimeDebugNoBound};
use scale_info::TypeInfo;
use sp_consensus_grandpa::{AuthorityId, AuthorityList, AuthorityWeight, SetId};
use sp_std::marker::PhantomData;
/// A bounded list of Grandpa authorities with associated weights.
pub type StoredAuthorityList<MaxBridgedAuthorities> =
BoundedVec<(AuthorityId, AuthorityWeight), MaxBridgedAuthorities>;
/// Adapter for using `T::BridgedChain::MAX_BRIDGED_AUTHORITIES` in `BoundedVec`.
pub struct StoredAuthorityListLimit<T, I>(PhantomData<(T, I)>);
impl<T: Config<I>, I: 'static> Get<u32> for StoredAuthorityListLimit<T, I> {
fn get() -> u32 {
T::BridgedChain::MAX_AUTHORITIES_COUNT
}
}
/// A bounded GRANDPA Authority List and ID.
#[derive(CloneNoBound, Decode, Encode, Eq, TypeInfo, MaxEncodedLen, RuntimeDebugNoBound)]
#[scale_info(skip_type_params(T, I))]
pub struct StoredAuthoritySet<T: Config<I>, I: 'static> {
/// List of GRANDPA authorities for the current round.
pub authorities: StoredAuthorityList<StoredAuthorityListLimit<T, I>>,
/// Monotonic identifier of the current GRANDPA authority set.
pub set_id: SetId,
}
impl<T: Config<I>, I: 'static> StoredAuthoritySet<T, I> {
/// Try to create a new bounded GRANDPA Authority Set from unbounded list.
///
/// Returns error if number of authorities in the provided list is too large.
pub fn try_new(authorities: AuthorityList, set_id: SetId) -> Result<Self, Error<T, I>> {
Ok(Self {
authorities: TryFrom::try_from(authorities)
.map_err(|_| Error::TooManyAuthoritiesInSet)?,
set_id,
})
}
/// Returns number of bytes that may be subtracted from the PoV component of
/// `submit_finality_proof` call, because the actual authorities set is smaller than the maximal
/// configured.
///
/// Maximal authorities set size is configured by the `MaxBridgedAuthorities` constant from
/// the pallet configuration. The PoV of the call includes the size of maximal authorities
/// count. If the actual size is smaller, we may subtract extra bytes from this component.
pub fn unused_proof_size(&self) -> u64 {
// we can only safely estimate bytes that are occupied by the authority data itself. We have
// no means here to compute PoV bytes, occupied by extra trie nodes or extra bytes in the
// whole set encoding
let single_authority_max_encoded_len =
<(AuthorityId, AuthorityWeight)>::max_encoded_len() as u64;
let extra_authorities =
T::BridgedChain::MAX_AUTHORITIES_COUNT.saturating_sub(self.authorities.len() as _);
single_authority_max_encoded_len.saturating_mul(extra_authorities as u64)
}
}
impl<T: Config<I>, I: 'static> PartialEq for StoredAuthoritySet<T, I> {
fn eq(&self, other: &Self) -> bool {
self.set_id == other.set_id && self.authorities == other.authorities
}
}
impl<T: Config<I>, I: 'static> Default for StoredAuthoritySet<T, I> {
fn default() -> Self {
StoredAuthoritySet { authorities: BoundedVec::default(), set_id: 0 }
}
}
impl<T: Config<I>, I: 'static> From<StoredAuthoritySet<T, I>> for AuthoritySet {
fn from(t: StoredAuthoritySet<T, I>) -> Self {
AuthoritySet { authorities: t.authorities.into(), set_id: t.set_id }
}
}
#[cfg(test)]
mod tests {
use crate::mock::{TestRuntime, MAX_BRIDGED_AUTHORITIES};
use bp_test_utils::authority_list;
type StoredAuthoritySet = super::StoredAuthoritySet<TestRuntime, ()>;
#[test]
fn unused_proof_size_works() {
let authority_entry = authority_list().pop().unwrap();
// when we have exactly `MaxBridgedAuthorities` authorities
assert_eq!(
StoredAuthoritySet::try_new(
vec![authority_entry.clone(); MAX_BRIDGED_AUTHORITIES as usize],
0,
)
.unwrap()
.unused_proof_size(),
0,
);
// when we have less than `MaxBridgedAuthorities` authorities
assert_eq!(
StoredAuthoritySet::try_new(
vec![authority_entry; MAX_BRIDGED_AUTHORITIES as usize - 1],
0,
)
.unwrap()
.unused_proof_size(),
40,
);
// and we can't have more than `MaxBridgedAuthorities` authorities in the bounded vec, so
// no test for this case
}
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Autogenerated weights for pallet_bridge_grandpa
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
//! DATE: 2023-03-02, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
//! HOSTNAME: `covid`, CPU: `11th Gen Intel(R) Core(TM) i7-11800H @ 2.30GHz`
//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024
// Executed Command:
// target/release/unknown-bridge-node
// benchmark
// pallet
// --chain=dev
// --steps=50
// --repeat=20
// --pallet=pallet_bridge_grandpa
// --extrinsic=*
// --execution=wasm
// --wasm-execution=Compiled
// --heap-pages=4096
// --output=./modules/grandpa/src/weights.rs
// --template=./.maintain/bridge-weight-template.hbs
#![allow(clippy::all)]
#![allow(unused_parens)]
#![allow(unused_imports)]
#![allow(missing_docs)]
use frame_support::{
traits::Get,
weights::{constants::RocksDbWeight, Weight},
};
use sp_std::marker::PhantomData;
/// Weight functions needed for pallet_bridge_grandpa.
pub trait WeightInfo {
fn submit_finality_proof(p: u32, v: u32) -> Weight;
}
/// Weights for `pallet_bridge_grandpa` that are generated using one of the Bridge testnets.
///
/// Those weights are test only and must never be used in production.
pub struct BridgeWeight<T>(PhantomData<T>);
impl<T: frame_system::Config> WeightInfo for BridgeWeight<T> {
/// Storage: BridgeUnknownGrandpa PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1),
/// added: 496, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa RequestCount (r:1 w:1)
///
/// Proof: BridgeUnknownGrandpa RequestCount (max_values: Some(1), max_size: Some(4), added:
/// 499, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa BestFinalized (r:1 w:1)
///
/// Proof: BridgeUnknownGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added:
/// 531, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa CurrentAuthoritySet (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(209),
/// added: 704, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHashesPointer (r:1 w:1)
///
/// Proof: BridgeUnknownGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4),
/// added: 499, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHashes (r:1 w:1)
///
/// Proof: BridgeUnknownGrandpa ImportedHashes (max_values: Some(14400), max_size: Some(36),
/// added: 2016, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:0 w:2)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// The range of component `p` is `[1, 4]`.
///
/// The range of component `v` is `[50, 100]`.
fn submit_finality_proof(p: u32, v: u32) -> Weight {
// Proof Size summary in bytes:
// Measured: `394 + p * (60 ±0)`
// Estimated: `4745`
// Minimum execution time: 228_072 nanoseconds.
Weight::from_parts(57_853_228, 4745)
// Standard Error: 149_421
.saturating_add(Weight::from_parts(36_708_702, 0).saturating_mul(p.into()))
// Standard Error: 10_625
.saturating_add(Weight::from_parts(1_469_032, 0).saturating_mul(v.into()))
.saturating_add(T::DbWeight::get().reads(6_u64))
.saturating_add(T::DbWeight::get().writes(6_u64))
}
}
// For backwards compatibility and tests
impl WeightInfo for () {
/// Storage: BridgeUnknownGrandpa PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1),
/// added: 496, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa RequestCount (r:1 w:1)
///
/// Proof: BridgeUnknownGrandpa RequestCount (max_values: Some(1), max_size: Some(4), added:
/// 499, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa BestFinalized (r:1 w:1)
///
/// Proof: BridgeUnknownGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added:
/// 531, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa CurrentAuthoritySet (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(209),
/// added: 704, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHashesPointer (r:1 w:1)
///
/// Proof: BridgeUnknownGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4),
/// added: 499, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHashes (r:1 w:1)
///
/// Proof: BridgeUnknownGrandpa ImportedHashes (max_values: Some(14400), max_size: Some(36),
/// added: 2016, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:0 w:2)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// The range of component `p` is `[1, 4]`.
///
/// The range of component `v` is `[50, 100]`.
fn submit_finality_proof(p: u32, v: u32) -> Weight {
// Proof Size summary in bytes:
// Measured: `394 + p * (60 ±0)`
// Estimated: `4745`
// Minimum execution time: 228_072 nanoseconds.
Weight::from_parts(57_853_228, 4745)
// Standard Error: 149_421
.saturating_add(Weight::from_parts(36_708_702, 0).saturating_mul(p.into()))
// Standard Error: 10_625
.saturating_add(Weight::from_parts(1_469_032, 0).saturating_mul(v.into()))
.saturating_add(RocksDbWeight::get().reads(6_u64))
.saturating_add(RocksDbWeight::get().writes(6_u64))
}
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Weight-related utilities.
use crate::weights::{BridgeWeight, WeightInfo};
use frame_support::weights::Weight;
/// Extended weight info.
pub trait WeightInfoExt: WeightInfo {
// Our configuration assumes that the runtime has special signed extensions used to:
//
// 1) boost priority of `submit_finality_proof` transactions;
//
// 2) slash relayer if he submits an invalid transaction.
//
// We read and update storage values of other pallets (`pallet-bridge-relayers` and
// balances/assets pallet). So we need to add this weight to the weight of our call.
// Hence two following methods.
/// Extra weight that is added to the `submit_finality_proof` call weight by signed extensions
/// that are declared at runtime level.
fn submit_finality_proof_overhead_from_runtime() -> Weight;
// Functions that are directly mapped to extrinsics weights.
/// Weight of message delivery extrinsic.
fn submit_finality_proof_weight(precommits_len: u32, votes_ancestries_len: u32) -> Weight {
let base_weight = Self::submit_finality_proof(precommits_len, votes_ancestries_len);
base_weight.saturating_add(Self::submit_finality_proof_overhead_from_runtime())
}
}
impl<T: frame_system::Config> WeightInfoExt for BridgeWeight<T> {
fn submit_finality_proof_overhead_from_runtime() -> Weight {
Weight::zero()
}
}
impl WeightInfoExt for () {
fn submit_finality_proof_overhead_from_runtime() -> Weight {
Weight::zero()
}
}
[package]
name = "pallet-bridge-messages"
description = "Module that allows bridged chains to exchange messages using lane concept."
version = "0.7.0"
authors.workspace = true
edition.workspace = true
license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
[lints]
workspace = true
[dependencies]
codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false }
log = { workspace = true }
num-traits = { version = "0.2", default-features = false }
scale-info = { version = "2.10.0", default-features = false, features = ["derive"] }
# Bridge dependencies
bp-messages = { path = "../../primitives/messages", default-features = false }
bp-runtime = { path = "../../primitives/runtime", default-features = false }
# Substrate Dependencies
frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false, optional = true }
frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
[dev-dependencies]
bp-test-utils = { path = "../../primitives/test-utils" }
pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" }
sp-io = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" }
[features]
default = ["std"]
std = [
"bp-messages/std",
"bp-runtime/std",
"codec/std",
"frame-benchmarking/std",
"frame-support/std",
"frame-system/std",
"log/std",
"num-traits/std",
"scale-info/std",
"sp-runtime/std",
"sp-std/std",
]
runtime-benchmarks = [
"frame-benchmarking/runtime-benchmarks",
"frame-support/runtime-benchmarks",
"frame-system/runtime-benchmarks",
"pallet-balances/runtime-benchmarks",
"sp-runtime/runtime-benchmarks",
]
try-runtime = [
"frame-support/try-runtime",
"frame-system/try-runtime",
"pallet-balances/try-runtime",
"sp-runtime/try-runtime",
]
# Bridge Messages Pallet
The messages pallet is used to deliver messages from source chain to target chain. Message is (almost) opaque to the
module and the final goal is to hand message to the message dispatch mechanism.
## Contents
- [Overview](#overview)
- [Message Workflow](#message-workflow)
- [Integrating Message Lane Module into Runtime](#integrating-messages-module-into-runtime)
- [Non-Essential Functionality](#non-essential-functionality)
- [Weights of Module Extrinsics](#weights-of-module-extrinsics)
## Overview
Message lane is an unidirectional channel, where messages are sent from source chain to the target chain. At the same
time, a single instance of messages module supports both outbound lanes and inbound lanes. So the chain where the module
is deployed (this chain), may act as a source chain for outbound messages (heading to a bridged chain) and as a target
chain for inbound messages (coming from a bridged chain).
Messages module supports multiple message lanes. Every message lane is identified with a 4-byte identifier. Messages
sent through the lane are assigned unique (for this lane) increasing integer value that is known as nonce ("number that
can only be used once"). Messages that are sent over the same lane are guaranteed to be delivered to the target chain in
the same order they're sent from the source chain. In other words, message with nonce `N` will be delivered right before
delivering a message with nonce `N+1`.
Single message lane may be seen as a transport channel for single application (onchain, offchain or mixed). At the same
time the module itself never dictates any lane or message rules. In the end, it is the runtime developer who defines
what message lane and message mean for this runtime.
In our [Kusama<>Polkadot bridge](../../docs/polkadot-kusama-bridge-overview.md) we are using lane as a channel of
communication between two parachains of different relay chains. For example, lane `[0, 0, 0, 0]` is used for Polkadot <>
Kusama Asset Hub communications. Other lanes may be used to bridge other parachains.
## Message Workflow
The pallet is not intended to be used by end users and provides no public calls to send the message. Instead, it
provides runtime-internal method that allows other pallets (or other runtime code) to queue outbound messages.
The message "appears" when some runtime code calls the `send_message()` method of the pallet. The submitter specifies
the lane that they're willing to use and the message itself. If some fee must be paid for sending the message, it must
be paid outside of the pallet. If a message passes all checks (that include, for example, message size check, disabled
lane check, ...), the nonce is assigned and the message is stored in the module storage. The message is in an
"undelivered" state now.
We assume that there are external, offchain actors, called relayers, that are submitting module related transactions to
both target and source chains. The pallet itself has no assumptions about relayers incentivization scheme, but it has
some callbacks for paying rewards. See [Integrating Messages Module into
runtime](#Integrating-Messages-Module-into-runtime) for details.
Eventually, some relayer would notice this message in the "undelivered" state and it would decide to deliver this
message. Relayer then crafts `receive_messages_proof()` transaction (aka delivery transaction) for the messages module
instance, deployed at the target chain. Relayer provides its account id at the source chain, the proof of message (or
several messages), the number of messages in the transaction and their cumulative dispatch weight. Once a transaction is
mined, the message is considered "delivered".
Once a message is delivered, the relayer may want to confirm delivery back to the source chain. There are two reasons
why it would want to do that. The first is that we intentionally limit number of "delivered", but not yet "confirmed"
messages at inbound lanes (see [What about other Constants in the Messages Module Configuration
Trait](#What-about-other-Constants-in-the-Messages-Module-Configuration-Trait) for explanation). So at some point, the
target chain may stop accepting new messages until relayers confirm some of these. The second is that if the relayer
wants to be rewarded for delivery, it must prove the fact that it has actually delivered the message. And this proof may
only be generated after the delivery transaction is mined. So relayer crafts the `receive_messages_delivery_proof()`
transaction (aka confirmation transaction) for the messages module instance, deployed at the source chain. Once this
transaction is mined, the message is considered "confirmed".
The "confirmed" state is the final state of the message. But there's one last thing related to the message - the fact
that it is now "confirmed" and reward has been paid to the relayer (or at least callback for this has been called), must
be confirmed to the target chain. Otherwise, we may reach the limit of "unconfirmed" messages at the target chain and it
will stop accepting new messages. So relayer sometimes includes a nonce of the latest "confirmed" message in the next
`receive_messages_proof()` transaction, proving that some messages have been confirmed.
## Integrating Messages Module into Runtime
As it has been said above, the messages module supports both outbound and inbound message lanes. So if we will integrate
a module in some runtime, it may act as the source chain runtime for outbound messages and as the target chain runtime
for inbound messages. In this section, we'll sometimes refer to the chain we're currently integrating with, as "this
chain" and the other chain as "bridged chain".
Messages module doesn't simply accept transactions that are claiming that the bridged chain has some updated data for
us. Instead of this, the module assumes that the bridged chain is able to prove that updated data in some way. The proof
is abstracted from the module and may be of any kind. In our Substrate-to-Substrate bridge we're using runtime storage
proofs. Other bridges may use transaction proofs, Substrate header digests or anything else that may be proved.
**IMPORTANT NOTE**: everything below in this chapter describes details of the messages module configuration. But if
you're interested in well-probed and relatively easy integration of two Substrate-based chains, you may want to look at
the [bridge-runtime-common](../../bin/runtime-common/) crate. This crate is providing a lot of helpers for integration,
which may be directly used from within your runtime. Then if you'll decide to change something in this scheme, get back
here for detailed information.
### General Information
The messages module supports instances. Every module instance is supposed to bridge this chain and some bridged chain.
To bridge with another chain, using another instance is suggested (this isn't forced anywhere in the code, though). Keep
in mind, that the pallet may be used to build virtual channels between multiple chains, as we do in our [Polkadot <>
Kusama bridge](../../docs/polkadot-kusama-bridge-overview.md). There, the pallet actually bridges only two parachains -
Kusama Bridge Hub and Polkadot Bridge Hub. However, other Kusama and Polkadot parachains are able to send (XCM) messages
to their Bridge Hubs. The messages will be delivered to the other side of the bridge and routed to the proper
destination parachain within the bridged chain consensus.
Message submitters may track message progress by inspecting module events. When Message is accepted, the
`MessageAccepted` event is emitted. The event contains both message lane identifier and nonce that has been assigned to
the message. When a message is delivered to the target chain, the `MessagesDelivered` event is emitted from the
`receive_messages_delivery_proof()` transaction. The `MessagesDelivered` contains the message lane identifier and
inclusive range of delivered message nonces.
The pallet provides no means to get the result of message dispatch at the target chain. If that is required, it must be
done outside of the pallet. For example, XCM messages, when dispatched, have special instructions to send some data back
to the sender. Other dispatchers may use similar mechanism for that.
### How to plug-in Messages Module to Send Messages to the Bridged Chain?
The `pallet_bridge_messages::Config` trait has 3 main associated types that are used to work with outbound messages. The
`pallet_bridge_messages::Config::TargetHeaderChain` defines how we see the bridged chain as the target for our outbound
messages. It must be able to check that the bridged chain may accept our message - like that the message has size below
maximal possible transaction size of the chain and so on. And when the relayer sends us a confirmation transaction, this
implementation must be able to parse and verify the proof of messages delivery. Normally, you would reuse the same
(configurable) type on all chains that are sending messages to the same bridged chain.
The last type is the `pallet_bridge_messages::Config::DeliveryConfirmationPayments`. When confirmation
transaction is received, we call the `pay_reward()` method, passing the range of delivered messages.
You may use the [`pallet-bridge-relayers`](../relayers/) pallet and its
[`DeliveryConfirmationPaymentsAdapter`](../relayers/src/payment_adapter.rs) adapter as a possible
implementation. It allows you to pay fixed reward for relaying the message and some of its portion
for confirming delivery.
### I have a Messages Module in my Runtime, but I Want to Reject all Outbound Messages. What shall I do?
You should be looking at the `bp_messages::source_chain::ForbidOutboundMessages` structure
[`bp_messages::source_chain`](../../primitives/messages/src/source_chain.rs). It implements all required traits and will
simply reject all transactions, related to outbound messages.
### How to plug-in Messages Module to Receive Messages from the Bridged Chain?
The `pallet_bridge_messages::Config` trait has 2 main associated types that are used to work with inbound messages. The
`pallet_bridge_messages::Config::SourceHeaderChain` defines how we see the bridged chain as the source of our inbound
messages. When relayer sends us a delivery transaction, this implementation must be able to parse and verify the proof
of messages wrapped in this transaction. Normally, you would reuse the same (configurable) type on all chains that are
sending messages to the same bridged chain.
The `pallet_bridge_messages::Config::MessageDispatch` defines a way on how to dispatch delivered messages. Apart from
actually dispatching the message, the implementation must return the correct dispatch weight of the message before
dispatch is called.
### I have a Messages Module in my Runtime, but I Want to Reject all Inbound Messages. What shall I do?
You should be looking at the `bp_messages::target_chain::ForbidInboundMessages` structure from the
[`bp_messages::target_chain`](../../primitives/messages/src/target_chain.rs) module. It implements all required traits
and will simply reject all transactions, related to inbound messages.
### What about other Constants in the Messages Module Configuration Trait?
Two settings that are used to check messages in the `send_message()` function. The
`pallet_bridge_messages::Config::ActiveOutboundLanes` is an array of all message lanes, that may be used to send
messages. All messages sent using other lanes are rejected. All messages that have size above
`pallet_bridge_messages::Config::MaximalOutboundPayloadSize` will also be rejected.
To be able to reward the relayer for delivering messages, we store a map of message nonces range => identifier of the
relayer that has delivered this range at the target chain runtime storage. If a relayer delivers multiple consequent
ranges, they're merged into single entry. So there may be more than one entry for the same relayer. Eventually, this
whole map must be delivered back to the source chain to confirm delivery and pay rewards. So to make sure we are able to
craft this confirmation transaction, we need to: (1) keep the size of this map below a certain limit and (2) make sure
that the weight of processing this map is below a certain limit. Both size and processing weight mostly depend on the
number of entries. The number of entries is limited with the
`pallet_bridge_messages::ConfigMaxUnrewardedRelayerEntriesAtInboundLane` parameter. Processing weight also depends on
the total number of messages that are being confirmed, because every confirmed message needs to be read. So there's
another `pallet_bridge_messages::Config::MaxUnconfirmedMessagesAtInboundLane` parameter for that.
When choosing values for these parameters, you must also keep in mind that if proof in your scheme is based on finality
of headers (and it is the most obvious option for Substrate-based chains with finality notion), then choosing too small
values for these parameters may cause significant delays in message delivery. That's because there are too many actors
involved in this scheme: 1) authorities that are finalizing headers of the target chain need to finalize header with
non-empty map; 2) the headers relayer then needs to submit this header and its finality proof to the source chain; 3)
the messages relayer must then send confirmation transaction (storage proof of this map) to the source chain; 4) when
the confirmation transaction will be mined at some header, source chain authorities must finalize this header; 5) the
headers relay then needs to submit this header and its finality proof to the target chain; 6) only now the messages
relayer may submit new messages from the source to target chain and prune the entry from the map.
Delivery transaction requires the relayer to provide both number of entries and total number of messages in the map.
This means that the module never charges an extra cost for delivering a map - the relayer would need to pay exactly for
the number of entries+messages it has delivered. So the best guess for values of these parameters would be the pair that
would occupy `N` percent of the maximal transaction size and weight of the source chain. The `N` should be large enough
to process large maps, at the same time keeping reserve for future source chain upgrades.
## Non-Essential Functionality
There may be a special account in every runtime where the messages module is deployed. This account, named 'module
owner', is like a module-level sudo account - he's able to halt and resume all module operations without requiring
runtime upgrade. Calls that are related to this account are:
- `fn set_owner()`: current module owner may call it to transfer "ownership" to another account;
- `fn halt_operations()`: the module owner (or sudo account) may call this function to stop all module operations. After
this call, all message-related transactions will be rejected until further `resume_operations` call'. This call may be
used when something extraordinary happens with the bridge;
- `fn resume_operations()`: module owner may call this function to resume bridge operations. The module will resume its
regular operations after this call.
If pallet owner is not defined, the governance may be used to make those calls.
## Messages Relay
We have an offchain actor, who is watching for new messages and submits them to the bridged chain. It is the messages
relay - you may look at the [crate level documentation and the code](../../relays/messages/).
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Messages pallet benchmarking.
use crate::{
inbound_lane::InboundLaneStorage, outbound_lane, weights_ext::EXPECTED_DEFAULT_MESSAGE_LENGTH,
Call, OutboundLanes, RuntimeInboundLaneStorage,
};
use bp_messages::{
source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, DeliveredMessages,
InboundLaneData, LaneId, MessageNonce, OutboundLaneData, UnrewardedRelayer,
UnrewardedRelayersState,
};
use bp_runtime::StorageProofSize;
use codec::Decode;
use frame_benchmarking::{account, benchmarks_instance_pallet};
use frame_support::weights::Weight;
use frame_system::RawOrigin;
use sp_runtime::{traits::TrailingZeroInput, BoundedVec};
use sp_std::{ops::RangeInclusive, prelude::*};
const SEED: u32 = 0;
/// Pallet we're benchmarking here.
pub struct Pallet<T: Config<I>, I: 'static = ()>(crate::Pallet<T, I>);
/// Benchmark-specific message proof parameters.
#[derive(Debug)]
pub struct MessageProofParams {
/// Id of the lane.
pub lane: LaneId,
/// Range of messages to include in the proof.
pub message_nonces: RangeInclusive<MessageNonce>,
/// If `Some`, the proof needs to include this outbound lane data.
pub outbound_lane_data: Option<OutboundLaneData>,
/// If `true`, the caller expects that the proof will contain correct messages that will
/// be successfully dispatched. This is only called from the "optional"
/// `receive_single_message_proof_with_dispatch` benchmark. If you don't need it, just
/// return `true` from the `is_message_successfully_dispatched`.
pub is_successful_dispatch_expected: bool,
/// Proof size requirements.
pub size: StorageProofSize,
}
/// Benchmark-specific message delivery proof parameters.
#[derive(Debug)]
pub struct MessageDeliveryProofParams<ThisChainAccountId> {
/// Id of the lane.
pub lane: LaneId,
/// The proof needs to include this inbound lane data.
pub inbound_lane_data: InboundLaneData<ThisChainAccountId>,
/// Proof size requirements.
pub size: StorageProofSize,
}
/// Trait that must be implemented by runtime.
pub trait Config<I: 'static>: crate::Config<I> {
/// Lane id to use in benchmarks.
///
/// By default, lane 00000000 is used.
fn bench_lane_id() -> LaneId {
LaneId([0, 0, 0, 0])
}
/// Return id of relayer account at the bridged chain.
///
/// By default, zero account is returned.
fn bridged_relayer_id() -> Self::InboundRelayer {
Self::InboundRelayer::decode(&mut TrailingZeroInput::zeroes()).unwrap()
}
/// Create given account and give it enough balance for test purposes. Used to create
/// relayer account at the target chain. Is strictly necessary when your rewards scheme
/// assumes that the relayer account must exist.
///
/// Does nothing by default.
fn endow_account(_account: &Self::AccountId) {}
/// Prepare messages proof to receive by the module.
fn prepare_message_proof(
params: MessageProofParams,
) -> (<Self::SourceHeaderChain as SourceHeaderChain>::MessagesProof, Weight);
/// Prepare messages delivery proof to receive by the module.
fn prepare_message_delivery_proof(
params: MessageDeliveryProofParams<Self::AccountId>,
) -> <Self::TargetHeaderChain as TargetHeaderChain<Self::OutboundPayload, Self::AccountId>>::MessagesDeliveryProof;
/// Returns true if message has been successfully dispatched or not.
fn is_message_successfully_dispatched(_nonce: MessageNonce) -> bool {
true
}
/// Returns true if given relayer has been rewarded for some of its actions.
fn is_relayer_rewarded(relayer: &Self::AccountId) -> bool;
}
benchmarks_instance_pallet! {
//
// Benchmarks that are used directly by the runtime calls weight formulae.
//
// Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions:
// * proof does not include outbound lane state proof;
// * inbound lane already has state, so it needs to be read and decoded;
// * message is dispatched (reminder: dispatch weight should be minimal);
// * message requires all heavy checks done by dispatcher.
//
// This is base benchmark for all other message delivery benchmarks.
receive_single_message_proof {
let relayer_id_on_source = T::bridged_relayer_id();
let relayer_id_on_target = account("relayer", 0, SEED);
T::endow_account(&relayer_id_on_target);
// mark messages 1..=20 as delivered
receive_messages::<T, I>(20);
let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams {
lane: T::bench_lane_id(),
message_nonces: 21..=21,
outbound_lane_data: None,
is_successful_dispatch_expected: false,
size: StorageProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH),
});
}: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight)
verify {
assert_eq!(
crate::InboundLanes::<T, I>::get(&T::bench_lane_id()).last_delivered_nonce(),
21,
);
}
// Benchmark `receive_messages_proof` extrinsic with two minimal-weight messages and following conditions:
// * proof does not include outbound lane state proof;
// * inbound lane already has state, so it needs to be read and decoded;
// * message is dispatched (reminder: dispatch weight should be minimal);
// * message requires all heavy checks done by dispatcher.
//
// The weight of single message delivery could be approximated as
// `weight(receive_two_messages_proof) - weight(receive_single_message_proof)`.
// This won't be super-accurate if message has non-zero dispatch weight, but estimation should
// be close enough to real weight.
receive_two_messages_proof {
let relayer_id_on_source = T::bridged_relayer_id();
let relayer_id_on_target = account("relayer", 0, SEED);
T::endow_account(&relayer_id_on_target);
// mark messages 1..=20 as delivered
receive_messages::<T, I>(20);
let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams {
lane: T::bench_lane_id(),
message_nonces: 21..=22,
outbound_lane_data: None,
is_successful_dispatch_expected: false,
size: StorageProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH),
});
}: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 2, dispatch_weight)
verify {
assert_eq!(
crate::InboundLanes::<T, I>::get(&T::bench_lane_id()).last_delivered_nonce(),
22,
);
}
// Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions:
// * proof includes outbound lane state proof;
// * inbound lane already has state, so it needs to be read and decoded;
// * message is successfully dispatched (reminder: dispatch weight should be minimal);
// * message requires all heavy checks done by dispatcher.
//
// The weight of outbound lane state delivery would be
// `weight(receive_single_message_proof_with_outbound_lane_state) - weight(receive_single_message_proof)`.
// This won't be super-accurate if message has non-zero dispatch weight, but estimation should
// be close enough to real weight.
receive_single_message_proof_with_outbound_lane_state {
let relayer_id_on_source = T::bridged_relayer_id();
let relayer_id_on_target = account("relayer", 0, SEED);
T::endow_account(&relayer_id_on_target);
// mark messages 1..=20 as delivered
receive_messages::<T, I>(20);
let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams {
lane: T::bench_lane_id(),
message_nonces: 21..=21,
outbound_lane_data: Some(OutboundLaneData {
oldest_unpruned_nonce: 21,
latest_received_nonce: 20,
latest_generated_nonce: 21,
}),
is_successful_dispatch_expected: false,
size: StorageProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH),
});
}: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight)
verify {
let lane_state = crate::InboundLanes::<T, I>::get(&T::bench_lane_id());
assert_eq!(lane_state.last_delivered_nonce(), 21);
assert_eq!(lane_state.last_confirmed_nonce, 20);
}
// Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions:
// * the proof has large leaf with total size of approximately 1KB;
// * proof does not include outbound lane state proof;
// * inbound lane already has state, so it needs to be read and decoded;
// * message is dispatched (reminder: dispatch weight should be minimal);
// * message requires all heavy checks done by dispatcher.
//
// With single KB of messages proof, the weight of the call is increased (roughly) by
// `(receive_single_message_proof_16KB - receive_single_message_proof_1_kb) / 15`.
receive_single_message_proof_1_kb {
let relayer_id_on_source = T::bridged_relayer_id();
let relayer_id_on_target = account("relayer", 0, SEED);
T::endow_account(&relayer_id_on_target);
// mark messages 1..=20 as delivered
receive_messages::<T, I>(20);
let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams {
lane: T::bench_lane_id(),
message_nonces: 21..=21,
outbound_lane_data: None,
is_successful_dispatch_expected: false,
size: StorageProofSize::HasLargeLeaf(1024),
});
}: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight)
verify {
assert_eq!(
crate::InboundLanes::<T, I>::get(&T::bench_lane_id()).last_delivered_nonce(),
21,
);
}
// Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions:
// * the proof has large leaf with total size of approximately 16KB;
// * proof does not include outbound lane state proof;
// * inbound lane already has state, so it needs to be read and decoded;
// * message is dispatched (reminder: dispatch weight should be minimal);
// * message requires all heavy checks done by dispatcher.
//
// Size of proof grows because it contains extra trie nodes in it.
//
// With single KB of messages proof, the weight of the call is increased (roughly) by
// `(receive_single_message_proof_16KB - receive_single_message_proof) / 15`.
receive_single_message_proof_16_kb {
let relayer_id_on_source = T::bridged_relayer_id();
let relayer_id_on_target = account("relayer", 0, SEED);
T::endow_account(&relayer_id_on_target);
// mark messages 1..=20 as delivered
receive_messages::<T, I>(20);
let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams {
lane: T::bench_lane_id(),
message_nonces: 21..=21,
outbound_lane_data: None,
is_successful_dispatch_expected: false,
size: StorageProofSize::HasLargeLeaf(16 * 1024),
});
}: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight)
verify {
assert_eq!(
crate::InboundLanes::<T, I>::get(&T::bench_lane_id()).last_delivered_nonce(),
21,
);
}
// Benchmark `receive_messages_delivery_proof` extrinsic with following conditions:
// * single relayer is rewarded for relaying single message;
// * relayer account does not exist (in practice it needs to exist in production environment).
//
// This is base benchmark for all other confirmations delivery benchmarks.
receive_delivery_proof_for_single_message {
let relayer_id: T::AccountId = account("relayer", 0, SEED);
// send message that we're going to confirm
send_regular_message::<T, I>();
let relayers_state = UnrewardedRelayersState {
unrewarded_relayer_entries: 1,
messages_in_oldest_entry: 1,
total_messages: 1,
last_delivered_nonce: 1,
};
let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams {
lane: T::bench_lane_id(),
inbound_lane_data: InboundLaneData {
relayers: vec![UnrewardedRelayer {
relayer: relayer_id.clone(),
messages: DeliveredMessages::new(1),
}].into_iter().collect(),
last_confirmed_nonce: 0,
},
size: StorageProofSize::Minimal(0),
});
}: receive_messages_delivery_proof(RawOrigin::Signed(relayer_id.clone()), proof, relayers_state)
verify {
assert_eq!(OutboundLanes::<T, I>::get(T::bench_lane_id()).latest_received_nonce, 1);
assert!(T::is_relayer_rewarded(&relayer_id));
}
// Benchmark `receive_messages_delivery_proof` extrinsic with following conditions:
// * single relayer is rewarded for relaying two messages;
// * relayer account does not exist (in practice it needs to exist in production environment).
//
// Additional weight for paying single-message reward to the same relayer could be computed
// as `weight(receive_delivery_proof_for_two_messages_by_single_relayer)
// - weight(receive_delivery_proof_for_single_message)`.
receive_delivery_proof_for_two_messages_by_single_relayer {
let relayer_id: T::AccountId = account("relayer", 0, SEED);
// send message that we're going to confirm
send_regular_message::<T, I>();
send_regular_message::<T, I>();
let relayers_state = UnrewardedRelayersState {
unrewarded_relayer_entries: 1,
messages_in_oldest_entry: 2,
total_messages: 2,
last_delivered_nonce: 2,
};
let mut delivered_messages = DeliveredMessages::new(1);
delivered_messages.note_dispatched_message();
let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams {
lane: T::bench_lane_id(),
inbound_lane_data: InboundLaneData {
relayers: vec![UnrewardedRelayer {
relayer: relayer_id.clone(),
messages: delivered_messages,
}].into_iter().collect(),
last_confirmed_nonce: 0,
},
size: StorageProofSize::Minimal(0),
});
}: receive_messages_delivery_proof(RawOrigin::Signed(relayer_id.clone()), proof, relayers_state)
verify {
assert_eq!(OutboundLanes::<T, I>::get(T::bench_lane_id()).latest_received_nonce, 2);
assert!(T::is_relayer_rewarded(&relayer_id));
}
// Benchmark `receive_messages_delivery_proof` extrinsic with following conditions:
// * two relayers are rewarded for relaying single message each;
// * relayer account does not exist (in practice it needs to exist in production environment).
//
// Additional weight for paying reward to the next relayer could be computed
// as `weight(receive_delivery_proof_for_two_messages_by_two_relayers)
// - weight(receive_delivery_proof_for_two_messages_by_single_relayer)`.
receive_delivery_proof_for_two_messages_by_two_relayers {
let relayer1_id: T::AccountId = account("relayer1", 1, SEED);
let relayer2_id: T::AccountId = account("relayer2", 2, SEED);
// send message that we're going to confirm
send_regular_message::<T, I>();
send_regular_message::<T, I>();
let relayers_state = UnrewardedRelayersState {
unrewarded_relayer_entries: 2,
messages_in_oldest_entry: 1,
total_messages: 2,
last_delivered_nonce: 2,
};
let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams {
lane: T::bench_lane_id(),
inbound_lane_data: InboundLaneData {
relayers: vec![
UnrewardedRelayer {
relayer: relayer1_id.clone(),
messages: DeliveredMessages::new(1),
},
UnrewardedRelayer {
relayer: relayer2_id.clone(),
messages: DeliveredMessages::new(2),
},
].into_iter().collect(),
last_confirmed_nonce: 0,
},
size: StorageProofSize::Minimal(0),
});
}: receive_messages_delivery_proof(RawOrigin::Signed(relayer1_id.clone()), proof, relayers_state)
verify {
assert_eq!(OutboundLanes::<T, I>::get(T::bench_lane_id()).latest_received_nonce, 2);
assert!(T::is_relayer_rewarded(&relayer1_id));
assert!(T::is_relayer_rewarded(&relayer2_id));
}
//
// Benchmarks that the runtime developers may use for proper pallet configuration.
//
// This benchmark is optional and may be used when runtime developer need a way to compute
// message dispatch weight. In this case, he needs to provide messages that can go the whole
// dispatch
//
// Benchmark `receive_messages_proof` extrinsic with single message and following conditions:
//
// * proof does not include outbound lane state proof;
// * inbound lane already has state, so it needs to be read and decoded;
// * message is **SUCCESSFULLY** dispatched;
// * message requires all heavy checks done by dispatcher.
receive_single_message_proof_with_dispatch {
// maybe dispatch weight relies on the message size too?
let i in EXPECTED_DEFAULT_MESSAGE_LENGTH .. EXPECTED_DEFAULT_MESSAGE_LENGTH * 16;
let relayer_id_on_source = T::bridged_relayer_id();
let relayer_id_on_target = account("relayer", 0, SEED);
T::endow_account(&relayer_id_on_target);
// mark messages 1..=20 as delivered
receive_messages::<T, I>(20);
let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams {
lane: T::bench_lane_id(),
message_nonces: 21..=21,
outbound_lane_data: None,
is_successful_dispatch_expected: true,
size: StorageProofSize::Minimal(i),
});
}: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight)
verify {
assert_eq!(
crate::InboundLanes::<T, I>::get(&T::bench_lane_id()).last_delivered_nonce(),
21,
);
assert!(T::is_message_successfully_dispatched(21));
}
impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::TestRuntime)
}
fn send_regular_message<T: Config<I>, I: 'static>() {
let mut outbound_lane = outbound_lane::<T, I>(T::bench_lane_id());
outbound_lane.send_message(BoundedVec::try_from(vec![]).expect("We craft valid messages"));
}
fn receive_messages<T: Config<I>, I: 'static>(nonce: MessageNonce) {
let mut inbound_lane_storage =
RuntimeInboundLaneStorage::<T, I>::from_lane_id(T::bench_lane_id());
inbound_lane_storage.set_data(InboundLaneData {
relayers: vec![UnrewardedRelayer {
relayer: T::bridged_relayer_id(),
messages: DeliveredMessages::new(nonce),
}]
.into_iter()
.collect(),
last_confirmed_nonce: 0,
});
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Everything about incoming messages receival.
use crate::Config;
use bp_messages::{
target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch},
DeliveredMessages, InboundLaneData, LaneId, MessageKey, MessageNonce, OutboundLaneData,
ReceivalResult, UnrewardedRelayer,
};
use codec::{Decode, Encode, EncodeLike, MaxEncodedLen};
use frame_support::traits::Get;
use scale_info::{Type, TypeInfo};
use sp_runtime::RuntimeDebug;
use sp_std::prelude::PartialEq;
/// Inbound lane storage.
pub trait InboundLaneStorage {
/// Id of relayer on source chain.
type Relayer: Clone + PartialEq;
/// Lane id.
fn id(&self) -> LaneId;
/// Return maximal number of unrewarded relayer entries in inbound lane.
fn max_unrewarded_relayer_entries(&self) -> MessageNonce;
/// Return maximal number of unconfirmed messages in inbound lane.
fn max_unconfirmed_messages(&self) -> MessageNonce;
/// Get lane data from the storage.
fn get_or_init_data(&mut self) -> InboundLaneData<Self::Relayer>;
/// Update lane data in the storage.
fn set_data(&mut self, data: InboundLaneData<Self::Relayer>);
}
/// Inbound lane data wrapper that implements `MaxEncodedLen`.
///
/// We have already had `MaxEncodedLen`-like functionality before, but its usage has
/// been localized and we haven't been passing bounds (maximal count of unrewarded relayer entries,
/// maximal count of unconfirmed messages) everywhere. This wrapper allows us to avoid passing
/// these generic bounds all over the code.
///
/// The encoding of this type matches encoding of the corresponding `MessageData`.
#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq)]
pub struct StoredInboundLaneData<T: Config<I>, I: 'static>(pub InboundLaneData<T::InboundRelayer>);
impl<T: Config<I>, I: 'static> sp_std::ops::Deref for StoredInboundLaneData<T, I> {
type Target = InboundLaneData<T::InboundRelayer>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T: Config<I>, I: 'static> sp_std::ops::DerefMut for StoredInboundLaneData<T, I> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<T: Config<I>, I: 'static> Default for StoredInboundLaneData<T, I> {
fn default() -> Self {
StoredInboundLaneData(Default::default())
}
}
impl<T: Config<I>, I: 'static> From<StoredInboundLaneData<T, I>>
for InboundLaneData<T::InboundRelayer>
{
fn from(data: StoredInboundLaneData<T, I>) -> Self {
data.0
}
}
impl<T: Config<I>, I: 'static> EncodeLike<StoredInboundLaneData<T, I>>
for InboundLaneData<T::InboundRelayer>
{
}
impl<T: Config<I>, I: 'static> TypeInfo for StoredInboundLaneData<T, I> {
type Identity = Self;
fn type_info() -> Type {
InboundLaneData::<T::InboundRelayer>::type_info()
}
}
impl<T: Config<I>, I: 'static> MaxEncodedLen for StoredInboundLaneData<T, I> {
fn max_encoded_len() -> usize {
InboundLaneData::<T::InboundRelayer>::encoded_size_hint(
T::MaxUnrewardedRelayerEntriesAtInboundLane::get() as usize,
)
.unwrap_or(usize::MAX)
}
}
/// Inbound messages lane.
pub struct InboundLane<S> {
storage: S,
}
impl<S: InboundLaneStorage> InboundLane<S> {
/// Create new inbound lane backed by given storage.
pub fn new(storage: S) -> Self {
InboundLane { storage }
}
/// Returns `mut` storage reference.
pub fn storage_mut(&mut self) -> &mut S {
&mut self.storage
}
/// Receive state of the corresponding outbound lane.
pub fn receive_state_update(
&mut self,
outbound_lane_data: OutboundLaneData,
) -> Option<MessageNonce> {
let mut data = self.storage.get_or_init_data();
let last_delivered_nonce = data.last_delivered_nonce();
if outbound_lane_data.latest_received_nonce > last_delivered_nonce {
// this is something that should never happen if proofs are correct
return None
}
if outbound_lane_data.latest_received_nonce <= data.last_confirmed_nonce {
return None
}
let new_confirmed_nonce = outbound_lane_data.latest_received_nonce;
data.last_confirmed_nonce = new_confirmed_nonce;
// Firstly, remove all of the records where higher nonce <= new confirmed nonce
while data
.relayers
.front()
.map(|entry| entry.messages.end <= new_confirmed_nonce)
.unwrap_or(false)
{
data.relayers.pop_front();
}
// Secondly, update the next record with lower nonce equal to new confirmed nonce if needed.
// Note: There will be max. 1 record to update as we don't allow messages from relayers to
// overlap.
match data.relayers.front_mut() {
Some(entry) if entry.messages.begin <= new_confirmed_nonce => {
entry.messages.begin = new_confirmed_nonce + 1;
},
_ => {},
}
self.storage.set_data(data);
Some(outbound_lane_data.latest_received_nonce)
}
/// Receive new message.
pub fn receive_message<Dispatch: MessageDispatch>(
&mut self,
relayer_at_bridged_chain: &S::Relayer,
nonce: MessageNonce,
message_data: DispatchMessageData<Dispatch::DispatchPayload>,
) -> ReceivalResult<Dispatch::DispatchLevelResult> {
let mut data = self.storage.get_or_init_data();
if Some(nonce) != data.last_delivered_nonce().checked_add(1) {
return ReceivalResult::InvalidNonce
}
// if there are more unrewarded relayer entries than we may accept, reject this message
if data.relayers.len() as MessageNonce >= self.storage.max_unrewarded_relayer_entries() {
return ReceivalResult::TooManyUnrewardedRelayers
}
// if there are more unconfirmed messages than we may accept, reject this message
let unconfirmed_messages_count = nonce.saturating_sub(data.last_confirmed_nonce);
if unconfirmed_messages_count > self.storage.max_unconfirmed_messages() {
return ReceivalResult::TooManyUnconfirmedMessages
}
// then, dispatch message
let dispatch_result = Dispatch::dispatch(DispatchMessage {
key: MessageKey { lane_id: self.storage.id(), nonce },
data: message_data,
});
// now let's update inbound lane storage
match data.relayers.back_mut() {
Some(entry) if entry.relayer == *relayer_at_bridged_chain => {
entry.messages.note_dispatched_message();
},
_ => {
data.relayers.push_back(UnrewardedRelayer {
relayer: relayer_at_bridged_chain.clone(),
messages: DeliveredMessages::new(nonce),
});
},
};
self.storage.set_data(data);
ReceivalResult::Dispatched(dispatch_result)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
inbound_lane,
mock::{
dispatch_result, inbound_message_data, inbound_unrewarded_relayers_state, run_test,
unrewarded_relayer, TestMessageDispatch, TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID,
TEST_RELAYER_A, TEST_RELAYER_B, TEST_RELAYER_C,
},
RuntimeInboundLaneStorage,
};
use bp_messages::UnrewardedRelayersState;
fn receive_regular_message(
lane: &mut InboundLane<RuntimeInboundLaneStorage<TestRuntime, ()>>,
nonce: MessageNonce,
) {
assert_eq!(
lane.receive_message::<TestMessageDispatch>(
&TEST_RELAYER_A,
nonce,
inbound_message_data(REGULAR_PAYLOAD)
),
ReceivalResult::Dispatched(dispatch_result(0))
);
}
#[test]
fn receive_status_update_ignores_status_from_the_future() {
run_test(|| {
let mut lane = inbound_lane::<TestRuntime, _>(TEST_LANE_ID);
receive_regular_message(&mut lane, 1);
assert_eq!(
lane.receive_state_update(OutboundLaneData {
latest_received_nonce: 10,
..Default::default()
}),
None,
);
assert_eq!(lane.storage.get_or_init_data().last_confirmed_nonce, 0);
});
}
#[test]
fn receive_status_update_ignores_obsolete_status() {
run_test(|| {
let mut lane = inbound_lane::<TestRuntime, _>(TEST_LANE_ID);
receive_regular_message(&mut lane, 1);
receive_regular_message(&mut lane, 2);
receive_regular_message(&mut lane, 3);
assert_eq!(
lane.receive_state_update(OutboundLaneData {
latest_received_nonce: 3,
..Default::default()
}),
Some(3),
);
assert_eq!(lane.storage.get_or_init_data().last_confirmed_nonce, 3);
assert_eq!(
lane.receive_state_update(OutboundLaneData {
latest_received_nonce: 3,
..Default::default()
}),
None,
);
assert_eq!(lane.storage.get_or_init_data().last_confirmed_nonce, 3);
});
}
#[test]
fn receive_status_update_works() {
run_test(|| {
let mut lane = inbound_lane::<TestRuntime, _>(TEST_LANE_ID);
receive_regular_message(&mut lane, 1);
receive_regular_message(&mut lane, 2);
receive_regular_message(&mut lane, 3);
assert_eq!(lane.storage.get_or_init_data().last_confirmed_nonce, 0);
assert_eq!(
lane.storage.get_or_init_data().relayers,
vec![unrewarded_relayer(1, 3, TEST_RELAYER_A)]
);
assert_eq!(
lane.receive_state_update(OutboundLaneData {
latest_received_nonce: 2,
..Default::default()
}),
Some(2),
);
assert_eq!(lane.storage.get_or_init_data().last_confirmed_nonce, 2);
assert_eq!(
lane.storage.get_or_init_data().relayers,
vec![unrewarded_relayer(3, 3, TEST_RELAYER_A)]
);
assert_eq!(
lane.receive_state_update(OutboundLaneData {
latest_received_nonce: 3,
..Default::default()
}),
Some(3),
);
assert_eq!(lane.storage.get_or_init_data().last_confirmed_nonce, 3);
assert_eq!(lane.storage.get_or_init_data().relayers, vec![]);
});
}
#[test]
fn receive_status_update_works_with_batches_from_relayers() {
run_test(|| {
let mut lane = inbound_lane::<TestRuntime, _>(TEST_LANE_ID);
let mut seed_storage_data = lane.storage.get_or_init_data();
// Prepare data
seed_storage_data.last_confirmed_nonce = 0;
seed_storage_data.relayers.push_back(unrewarded_relayer(1, 1, TEST_RELAYER_A));
// Simulate messages batch (2, 3, 4) from relayer #2
seed_storage_data.relayers.push_back(unrewarded_relayer(2, 4, TEST_RELAYER_B));
seed_storage_data.relayers.push_back(unrewarded_relayer(5, 5, TEST_RELAYER_C));
lane.storage.set_data(seed_storage_data);
// Check
assert_eq!(
lane.receive_state_update(OutboundLaneData {
latest_received_nonce: 3,
..Default::default()
}),
Some(3),
);
assert_eq!(lane.storage.get_or_init_data().last_confirmed_nonce, 3);
assert_eq!(
lane.storage.get_or_init_data().relayers,
vec![
unrewarded_relayer(4, 4, TEST_RELAYER_B),
unrewarded_relayer(5, 5, TEST_RELAYER_C)
]
);
});
}
#[test]
fn fails_to_receive_message_with_incorrect_nonce() {
run_test(|| {
let mut lane = inbound_lane::<TestRuntime, _>(TEST_LANE_ID);
assert_eq!(
lane.receive_message::<TestMessageDispatch>(
&TEST_RELAYER_A,
10,
inbound_message_data(REGULAR_PAYLOAD)
),
ReceivalResult::InvalidNonce
);
assert_eq!(lane.storage.get_or_init_data().last_delivered_nonce(), 0);
});
}
#[test]
fn fails_to_receive_messages_above_unrewarded_relayer_entries_limit_per_lane() {
run_test(|| {
let mut lane = inbound_lane::<TestRuntime, _>(TEST_LANE_ID);
let max_nonce =
<TestRuntime as Config>::MaxUnrewardedRelayerEntriesAtInboundLane::get();
for current_nonce in 1..max_nonce + 1 {
assert_eq!(
lane.receive_message::<TestMessageDispatch>(
&(TEST_RELAYER_A + current_nonce),
current_nonce,
inbound_message_data(REGULAR_PAYLOAD)
),
ReceivalResult::Dispatched(dispatch_result(0))
);
}
// Fails to dispatch new message from different than latest relayer.
assert_eq!(
lane.receive_message::<TestMessageDispatch>(
&(TEST_RELAYER_A + max_nonce + 1),
max_nonce + 1,
inbound_message_data(REGULAR_PAYLOAD)
),
ReceivalResult::TooManyUnrewardedRelayers,
);
// Fails to dispatch new messages from latest relayer. Prevents griefing attacks.
assert_eq!(
lane.receive_message::<TestMessageDispatch>(
&(TEST_RELAYER_A + max_nonce),
max_nonce + 1,
inbound_message_data(REGULAR_PAYLOAD)
),
ReceivalResult::TooManyUnrewardedRelayers,
);
});
}
#[test]
fn fails_to_receive_messages_above_unconfirmed_messages_limit_per_lane() {
run_test(|| {
let mut lane = inbound_lane::<TestRuntime, _>(TEST_LANE_ID);
let max_nonce = <TestRuntime as Config>::MaxUnconfirmedMessagesAtInboundLane::get();
for current_nonce in 1..=max_nonce {
assert_eq!(
lane.receive_message::<TestMessageDispatch>(
&TEST_RELAYER_A,
current_nonce,
inbound_message_data(REGULAR_PAYLOAD)
),
ReceivalResult::Dispatched(dispatch_result(0))
);
}
// Fails to dispatch new message from different than latest relayer.
assert_eq!(
lane.receive_message::<TestMessageDispatch>(
&TEST_RELAYER_B,
max_nonce + 1,
inbound_message_data(REGULAR_PAYLOAD)
),
ReceivalResult::TooManyUnconfirmedMessages,
);
// Fails to dispatch new messages from latest relayer.
assert_eq!(
lane.receive_message::<TestMessageDispatch>(
&TEST_RELAYER_A,
max_nonce + 1,
inbound_message_data(REGULAR_PAYLOAD)
),
ReceivalResult::TooManyUnconfirmedMessages,
);
});
}
#[test]
fn correctly_receives_following_messages_from_two_relayers_alternately() {
run_test(|| {
let mut lane = inbound_lane::<TestRuntime, _>(TEST_LANE_ID);
assert_eq!(
lane.receive_message::<TestMessageDispatch>(
&TEST_RELAYER_A,
1,
inbound_message_data(REGULAR_PAYLOAD)
),
ReceivalResult::Dispatched(dispatch_result(0))
);
assert_eq!(
lane.receive_message::<TestMessageDispatch>(
&TEST_RELAYER_B,
2,
inbound_message_data(REGULAR_PAYLOAD)
),
ReceivalResult::Dispatched(dispatch_result(0))
);
assert_eq!(
lane.receive_message::<TestMessageDispatch>(
&TEST_RELAYER_A,
3,
inbound_message_data(REGULAR_PAYLOAD)
),
ReceivalResult::Dispatched(dispatch_result(0))
);
assert_eq!(
lane.storage.get_or_init_data().relayers,
vec![
unrewarded_relayer(1, 1, TEST_RELAYER_A),
unrewarded_relayer(2, 2, TEST_RELAYER_B),
unrewarded_relayer(3, 3, TEST_RELAYER_A)
]
);
});
}
#[test]
fn rejects_same_message_from_two_different_relayers() {
run_test(|| {
let mut lane = inbound_lane::<TestRuntime, _>(TEST_LANE_ID);
assert_eq!(
lane.receive_message::<TestMessageDispatch>(
&TEST_RELAYER_A,
1,
inbound_message_data(REGULAR_PAYLOAD)
),
ReceivalResult::Dispatched(dispatch_result(0))
);
assert_eq!(
lane.receive_message::<TestMessageDispatch>(
&TEST_RELAYER_B,
1,
inbound_message_data(REGULAR_PAYLOAD)
),
ReceivalResult::InvalidNonce,
);
});
}
#[test]
fn correct_message_is_processed_instantly() {
run_test(|| {
let mut lane = inbound_lane::<TestRuntime, _>(TEST_LANE_ID);
receive_regular_message(&mut lane, 1);
assert_eq!(lane.storage.get_or_init_data().last_delivered_nonce(), 1);
});
}
#[test]
fn unspent_weight_is_returned_by_receive_message() {
run_test(|| {
let mut lane = inbound_lane::<TestRuntime, _>(TEST_LANE_ID);
let mut payload = REGULAR_PAYLOAD;
*payload.dispatch_result.unspent_weight.ref_time_mut() = 1;
assert_eq!(
lane.receive_message::<TestMessageDispatch>(
&TEST_RELAYER_A,
1,
inbound_message_data(payload)
),
ReceivalResult::Dispatched(dispatch_result(1))
);
});
}
#[test]
fn first_message_is_confirmed_correctly() {
run_test(|| {
let mut lane = inbound_lane::<TestRuntime, _>(TEST_LANE_ID);
receive_regular_message(&mut lane, 1);
receive_regular_message(&mut lane, 2);
assert_eq!(
lane.receive_state_update(OutboundLaneData {
latest_received_nonce: 1,
..Default::default()
}),
Some(1),
);
assert_eq!(
inbound_unrewarded_relayers_state(TEST_LANE_ID),
UnrewardedRelayersState {
unrewarded_relayer_entries: 1,
messages_in_oldest_entry: 1,
total_messages: 1,
last_delivered_nonce: 2,
},
);
});
}
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Runtime module that allows sending and receiving messages using lane concept:
//!
//! 1) the message is sent using `send_message()` call;
//! 2) every outbound message is assigned nonce;
//! 3) the messages are stored in the storage;
//! 4) external component (relay) delivers messages to bridged chain;
//! 5) messages are processed in order (ordered by assigned nonce);
//! 6) relay may send proof-of-delivery back to this chain.
//!
//! Once message is sent, its progress can be tracked by looking at module events.
//! The assigned nonce is reported using `MessageAccepted` event. When message is
//! delivered to the the bridged chain, it is reported using `MessagesDelivered` event.
//!
//! **IMPORTANT NOTE**: after generating weights (custom `WeighInfo` implementation) for
//! your runtime (where this module is plugged to), please add test for these weights.
//! The test should call the `ensure_weights_are_correct` function from this module.
//! If this test fails with your weights, then either weights are computed incorrectly,
//! or some benchmarks assumptions are broken for your runtime.
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
pub use inbound_lane::StoredInboundLaneData;
pub use outbound_lane::StoredMessagePayload;
pub use weights::WeightInfo;
pub use weights_ext::{
ensure_able_to_receive_confirmation, ensure_able_to_receive_message,
ensure_weights_are_correct, WeightInfoExt, EXPECTED_DEFAULT_MESSAGE_LENGTH,
EXTRA_STORAGE_PROOF_SIZE,
};
use crate::{
inbound_lane::{InboundLane, InboundLaneStorage},
outbound_lane::{OutboundLane, OutboundLaneStorage, ReceivalConfirmationError},
};
use bp_messages::{
source_chain::{
DeliveryConfirmationPayments, OnMessagesDelivered, SendMessageArtifacts, TargetHeaderChain,
},
target_chain::{
DeliveryPayments, DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages,
SourceHeaderChain,
},
DeliveredMessages, InboundLaneData, InboundMessageDetails, LaneId, MessageKey, MessageNonce,
MessagePayload, MessagesOperatingMode, OutboundLaneData, OutboundMessageDetails,
UnrewardedRelayersState, VerificationError,
};
use bp_runtime::{
BasicOperatingMode, ChainId, OwnedBridgeModule, PreComputedSize, RangeInclusiveExt, Size,
};
use codec::{Decode, Encode, MaxEncodedLen};
use frame_support::{dispatch::PostDispatchInfo, ensure, fail, traits::Get, DefaultNoBound};
use sp_runtime::traits::UniqueSaturatedFrom;
use sp_std::{marker::PhantomData, prelude::*};
mod inbound_lane;
mod outbound_lane;
mod weights_ext;
pub mod weights;
#[cfg(feature = "runtime-benchmarks")]
pub mod benchmarking;
#[cfg(test)]
mod mock;
pub use pallet::*;
/// The target that will be used when publishing logs related to this pallet.
pub const LOG_TARGET: &str = "runtime::bridge-messages";
#[frame_support::pallet]
pub mod pallet {
use super::*;
use bp_messages::{ReceivalResult, ReceivedMessages};
use bp_runtime::RangeInclusiveExt;
use frame_support::pallet_prelude::*;
use frame_system::pallet_prelude::*;
#[pallet::config]
pub trait Config<I: 'static = ()>: frame_system::Config {
// General types
/// The overarching event type.
type RuntimeEvent: From<Event<Self, I>>
+ IsType<<Self as frame_system::Config>::RuntimeEvent>;
/// Benchmarks results from runtime we're plugged into.
type WeightInfo: WeightInfoExt;
/// Gets the chain id value from the instance.
#[pallet::constant]
type BridgedChainId: Get<ChainId>;
/// Get all active outbound lanes that the message pallet is serving.
type ActiveOutboundLanes: Get<&'static [LaneId]>;
/// Maximal number of unrewarded relayer entries at inbound lane. Unrewarded means that the
/// relayer has delivered messages, but either confirmations haven't been delivered back to
/// the source chain, or we haven't received reward confirmations yet.
///
/// This constant limits maximal number of entries in the `InboundLaneData::relayers`. Keep
/// in mind that the same relayer account may take several (non-consecutive) entries in this
/// set.
type MaxUnrewardedRelayerEntriesAtInboundLane: Get<MessageNonce>;
/// Maximal number of unconfirmed messages at inbound lane. Unconfirmed means that the
/// message has been delivered, but either confirmations haven't been delivered back to the
/// source chain, or we haven't received reward confirmations for these messages yet.
///
/// This constant limits difference between last message from last entry of the
/// `InboundLaneData::relayers` and first message at the first entry.
///
/// There is no point of making this parameter lesser than
/// MaxUnrewardedRelayerEntriesAtInboundLane, because then maximal number of relayer entries
/// will be limited by maximal number of messages.
///
/// This value also represents maximal number of messages in single delivery transaction.
/// Transaction that is declaring more messages than this value, will be rejected. Even if
/// these messages are from different lanes.
type MaxUnconfirmedMessagesAtInboundLane: Get<MessageNonce>;
/// Maximal encoded size of the outbound payload.
#[pallet::constant]
type MaximalOutboundPayloadSize: Get<u32>;
/// Payload type of outbound messages. This payload is dispatched on the bridged chain.
type OutboundPayload: Parameter + Size;
/// Payload type of inbound messages. This payload is dispatched on this chain.
type InboundPayload: Decode;
/// Identifier of relayer that deliver messages to this chain. Relayer reward is paid on the
/// bridged chain.
type InboundRelayer: Parameter + MaxEncodedLen;
/// Delivery payments.
type DeliveryPayments: DeliveryPayments<Self::AccountId>;
// Types that are used by outbound_lane (on source chain).
/// Target header chain.
type TargetHeaderChain: TargetHeaderChain<Self::OutboundPayload, Self::AccountId>;
/// Delivery confirmation payments.
type DeliveryConfirmationPayments: DeliveryConfirmationPayments<Self::AccountId>;
/// Delivery confirmation callback.
type OnMessagesDelivered: OnMessagesDelivered;
// Types that are used by inbound_lane (on target chain).
/// Source header chain, as it is represented on target chain.
type SourceHeaderChain: SourceHeaderChain;
/// Message dispatch.
type MessageDispatch: MessageDispatch<DispatchPayload = Self::InboundPayload>;
}
/// Shortcut to messages proof type for Config.
pub type MessagesProofOf<T, I> =
<<T as Config<I>>::SourceHeaderChain as SourceHeaderChain>::MessagesProof;
/// Shortcut to messages delivery proof type for Config.
pub type MessagesDeliveryProofOf<T, I> =
<<T as Config<I>>::TargetHeaderChain as TargetHeaderChain<
<T as Config<I>>::OutboundPayload,
<T as frame_system::Config>::AccountId,
>>::MessagesDeliveryProof;
#[pallet::pallet]
pub struct Pallet<T, I = ()>(PhantomData<(T, I)>);
impl<T: Config<I>, I: 'static> OwnedBridgeModule<T> for Pallet<T, I> {
const LOG_TARGET: &'static str = LOG_TARGET;
type OwnerStorage = PalletOwner<T, I>;
type OperatingMode = MessagesOperatingMode;
type OperatingModeStorage = PalletOperatingMode<T, I>;
}
#[pallet::hooks]
impl<T: Config<I>, I: 'static> Hooks<BlockNumberFor<T>> for Pallet<T, I>
where
u32: TryFrom<BlockNumberFor<T>>,
{
fn on_idle(_block: BlockNumberFor<T>, remaining_weight: Weight) -> Weight {
// we'll need at least to read outbound lane state, kill a message and update lane state
let db_weight = T::DbWeight::get();
if !remaining_weight.all_gte(db_weight.reads_writes(1, 2)) {
return Weight::zero()
}
// messages from lane with index `i` in `ActiveOutboundLanes` are pruned when
// `System::block_number() % lanes.len() == i`. Otherwise we need to read lane states on
// every block, wasting the whole `remaining_weight` for nothing and causing starvation
// of the last lane pruning
let active_lanes = T::ActiveOutboundLanes::get();
let active_lanes_len = (active_lanes.len() as u32).into();
let active_lane_index = u32::unique_saturated_from(
frame_system::Pallet::<T>::block_number() % active_lanes_len,
);
let active_lane_id = active_lanes[active_lane_index as usize];
// first db read - outbound lane state
let mut active_lane = outbound_lane::<T, I>(active_lane_id);
let mut used_weight = db_weight.reads(1);
// and here we'll have writes
used_weight += active_lane.prune_messages(db_weight, remaining_weight - used_weight);
// we already checked we have enough `remaining_weight` to cover this `used_weight`
used_weight
}
}
#[pallet::call]
impl<T: Config<I>, I: 'static> Pallet<T, I> {
/// Change `PalletOwner`.
///
/// May only be called either by root, or by `PalletOwner`.
#[pallet::call_index(0)]
#[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))]
pub fn set_owner(origin: OriginFor<T>, new_owner: Option<T::AccountId>) -> DispatchResult {
<Self as OwnedBridgeModule<_>>::set_owner(origin, new_owner)
}
/// Halt or resume all/some pallet operations.
///
/// May only be called either by root, or by `PalletOwner`.
#[pallet::call_index(1)]
#[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))]
pub fn set_operating_mode(
origin: OriginFor<T>,
operating_mode: MessagesOperatingMode,
) -> DispatchResult {
<Self as OwnedBridgeModule<_>>::set_operating_mode(origin, operating_mode)
}
/// Receive messages proof from bridged chain.
///
/// The weight of the call assumes that the transaction always brings outbound lane
/// state update. Because of that, the submitter (relayer) has no benefit of not including
/// this data in the transaction, so reward confirmations lags should be minimal.
///
/// The call fails if:
///
/// - the pallet is halted;
///
/// - the call origin is not `Signed(_)`;
///
/// - there are too many messages in the proof;
///
/// - the proof verification procedure returns an error - e.g. because header used to craft
/// proof is not imported by the associated finality pallet;
///
/// - the `dispatch_weight` argument is not sufficient to dispatch all bundled messages.
///
/// The call may succeed, but some messages may not be delivered e.g. if they are not fit
/// into the unrewarded relayers vector.
#[pallet::call_index(2)]
#[pallet::weight(T::WeightInfo::receive_messages_proof_weight(proof, *messages_count, *dispatch_weight))]
pub fn receive_messages_proof(
origin: OriginFor<T>,
relayer_id_at_bridged_chain: T::InboundRelayer,
proof: MessagesProofOf<T, I>,
messages_count: u32,
dispatch_weight: Weight,
) -> DispatchResultWithPostInfo {
Self::ensure_not_halted().map_err(Error::<T, I>::BridgeModule)?;
let relayer_id_at_this_chain = ensure_signed(origin)?;
// reject transactions that are declaring too many messages
ensure!(
MessageNonce::from(messages_count) <= T::MaxUnconfirmedMessagesAtInboundLane::get(),
Error::<T, I>::TooManyMessagesInTheProof
);
// if message dispatcher is currently inactive, we won't accept any messages
ensure!(T::MessageDispatch::is_active(), Error::<T, I>::MessageDispatchInactive);
// why do we need to know the weight of this (`receive_messages_proof`) call? Because
// we may want to return some funds for not-dispatching (or partially dispatching) some
// messages to the call origin (relayer). And this is done by returning actual weight
// from the call. But we only know dispatch weight of every messages. So to refund
// relayer because we have not dispatched Message, we need to:
//
// ActualWeight = DeclaredWeight - Message.DispatchWeight
//
// The DeclaredWeight is exactly what's computed here. Unfortunately it is impossible
// to get pre-computed value (and it has been already computed by the executive).
let declared_weight = T::WeightInfo::receive_messages_proof_weight(
&proof,
messages_count,
dispatch_weight,
);
let mut actual_weight = declared_weight;
// verify messages proof && convert proof into messages
let messages = verify_and_decode_messages_proof::<
T::SourceHeaderChain,
T::InboundPayload,
>(proof, messages_count)
.map_err(|err| {
log::trace!(target: LOG_TARGET, "Rejecting invalid messages proof: {:?}", err,);
Error::<T, I>::InvalidMessagesProof
})?;
// dispatch messages and (optionally) update lane(s) state(s)
let mut total_messages = 0;
let mut valid_messages = 0;
let mut messages_received_status = Vec::with_capacity(messages.len());
let mut dispatch_weight_left = dispatch_weight;
for (lane_id, lane_data) in messages {
let mut lane = inbound_lane::<T, I>(lane_id);
// subtract extra storage proof bytes from the actual PoV size - there may be
// less unrewarded relayers than the maximal configured value
let lane_extra_proof_size_bytes = lane.storage_mut().extra_proof_size_bytes();
actual_weight = actual_weight.set_proof_size(
actual_weight.proof_size().saturating_sub(lane_extra_proof_size_bytes),
);
if let Some(lane_state) = lane_data.lane_state {
let updated_latest_confirmed_nonce = lane.receive_state_update(lane_state);
if let Some(updated_latest_confirmed_nonce) = updated_latest_confirmed_nonce {
log::trace!(
target: LOG_TARGET,
"Received lane {:?} state update: latest_confirmed_nonce={}. Unrewarded relayers: {:?}",
lane_id,
updated_latest_confirmed_nonce,
UnrewardedRelayersState::from(&lane.storage_mut().get_or_init_data()),
);
}
}
let mut lane_messages_received_status =
ReceivedMessages::new(lane_id, Vec::with_capacity(lane_data.messages.len()));
for mut message in lane_data.messages {
debug_assert_eq!(message.key.lane_id, lane_id);
total_messages += 1;
// ensure that relayer has declared enough weight for dispatching next message
// on this lane. We can't dispatch lane messages out-of-order, so if declared
// weight is not enough, let's move to next lane
let message_dispatch_weight = T::MessageDispatch::dispatch_weight(&mut message);
if message_dispatch_weight.any_gt(dispatch_weight_left) {
log::trace!(
target: LOG_TARGET,
"Cannot dispatch any more messages on lane {:?}. Weight: declared={}, left={}",
lane_id,
message_dispatch_weight,
dispatch_weight_left,
);
fail!(Error::<T, I>::InsufficientDispatchWeight);
}
let receival_result = lane.receive_message::<T::MessageDispatch>(
&relayer_id_at_bridged_chain,
message.key.nonce,
message.data,
);
// note that we're returning unspent weight to relayer even if message has been
// rejected by the lane. This allows relayers to submit spam transactions with
// e.g. the same set of already delivered messages over and over again, without
// losing funds for messages dispatch. But keep in mind that relayer pays base
// delivery transaction cost anyway. And base cost covers everything except
// dispatch, so we have a balance here.
let unspent_weight = match &receival_result {
ReceivalResult::Dispatched(dispatch_result) => {
valid_messages += 1;
dispatch_result.unspent_weight
},
ReceivalResult::InvalidNonce |
ReceivalResult::TooManyUnrewardedRelayers |
ReceivalResult::TooManyUnconfirmedMessages => message_dispatch_weight,
};
lane_messages_received_status.push(message.key.nonce, receival_result);
let unspent_weight = unspent_weight.min(message_dispatch_weight);
dispatch_weight_left -= message_dispatch_weight - unspent_weight;
actual_weight = actual_weight.saturating_sub(unspent_weight);
}
messages_received_status.push(lane_messages_received_status);
}
// let's now deal with relayer payments
T::DeliveryPayments::pay_reward(
relayer_id_at_this_chain,
total_messages,
valid_messages,
actual_weight,
);
log::debug!(
target: LOG_TARGET,
"Received messages: total={}, valid={}. Weight used: {}/{}.",
total_messages,
valid_messages,
actual_weight,
declared_weight,
);
Self::deposit_event(Event::MessagesReceived(messages_received_status));
Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes })
}
/// Receive messages delivery proof from bridged chain.
#[pallet::call_index(3)]
#[pallet::weight(T::WeightInfo::receive_messages_delivery_proof_weight(
proof,
relayers_state,
))]
pub fn receive_messages_delivery_proof(
origin: OriginFor<T>,
proof: MessagesDeliveryProofOf<T, I>,
mut relayers_state: UnrewardedRelayersState,
) -> DispatchResultWithPostInfo {
Self::ensure_not_halted().map_err(Error::<T, I>::BridgeModule)?;
let proof_size = proof.size();
let confirmation_relayer = ensure_signed(origin)?;
let (lane_id, lane_data) = T::TargetHeaderChain::verify_messages_delivery_proof(proof)
.map_err(|err| {
log::trace!(
target: LOG_TARGET,
"Rejecting invalid messages delivery proof: {:?}",
err,
);
Error::<T, I>::InvalidMessagesDeliveryProof
})?;
ensure!(
relayers_state.is_valid(&lane_data),
Error::<T, I>::InvalidUnrewardedRelayersState
);
// mark messages as delivered
let mut lane = outbound_lane::<T, I>(lane_id);
let last_delivered_nonce = lane_data.last_delivered_nonce();
let confirmed_messages = lane
.confirm_delivery(
relayers_state.total_messages,
last_delivered_nonce,
&lane_data.relayers,
)
.map_err(Error::<T, I>::ReceivalConfirmation)?;
if let Some(confirmed_messages) = confirmed_messages {
// emit 'delivered' event
let received_range = confirmed_messages.begin..=confirmed_messages.end;
Self::deposit_event(Event::MessagesDelivered {
lane_id,
messages: confirmed_messages,
});
// if some new messages have been confirmed, reward relayers
let actually_rewarded_relayers = T::DeliveryConfirmationPayments::pay_reward(
lane_id,
lane_data.relayers,
&confirmation_relayer,
&received_range,
);
// update relayers state with actual numbers to compute actual weight below
relayers_state.unrewarded_relayer_entries = sp_std::cmp::min(
relayers_state.unrewarded_relayer_entries,
actually_rewarded_relayers,
);
relayers_state.total_messages = sp_std::cmp::min(
relayers_state.total_messages,
received_range.checked_len().unwrap_or(MessageNonce::MAX),
);
};
log::trace!(
target: LOG_TARGET,
"Received messages delivery proof up to (and including) {} at lane {:?}",
last_delivered_nonce,
lane_id,
);
// notify others about messages delivery
T::OnMessagesDelivered::on_messages_delivered(
lane_id,
lane.data().queued_messages().saturating_len(),
);
// because of lags, the inbound lane state (`lane_data`) may have entries for
// already rewarded relayers and messages (if all entries are duplicated, then
// this transaction must be filtered out by our signed extension)
let actual_weight = T::WeightInfo::receive_messages_delivery_proof_weight(
&PreComputedSize(proof_size as usize),
&relayers_state,
);
Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes })
}
}
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event<T: Config<I>, I: 'static = ()> {
/// Message has been accepted and is waiting to be delivered.
MessageAccepted {
/// Lane, which has accepted the message.
lane_id: LaneId,
/// Nonce of accepted message.
nonce: MessageNonce,
},
/// Messages have been received from the bridged chain.
MessagesReceived(
/// Result of received messages dispatch.
Vec<ReceivedMessages<<T::MessageDispatch as MessageDispatch>::DispatchLevelResult>>,
),
/// Messages in the inclusive range have been delivered to the bridged chain.
MessagesDelivered {
/// Lane for which the delivery has been confirmed.
lane_id: LaneId,
/// Delivered messages.
messages: DeliveredMessages,
},
}
#[pallet::error]
#[derive(PartialEq, Eq)]
pub enum Error<T, I = ()> {
/// Pallet is not in Normal operating mode.
NotOperatingNormally,
/// The outbound lane is inactive.
InactiveOutboundLane,
/// The inbound message dispatcher is inactive.
MessageDispatchInactive,
/// Message has been treated as invalid by chain verifier.
MessageRejectedByChainVerifier(VerificationError),
/// Message has been treated as invalid by the pallet logic.
MessageRejectedByPallet(VerificationError),
/// Submitter has failed to pay fee for delivering and dispatching messages.
FailedToWithdrawMessageFee,
/// The transaction brings too many messages.
TooManyMessagesInTheProof,
/// Invalid messages has been submitted.
InvalidMessagesProof,
/// Invalid messages delivery proof has been submitted.
InvalidMessagesDeliveryProof,
/// The relayer has declared invalid unrewarded relayers state in the
/// `receive_messages_delivery_proof` call.
InvalidUnrewardedRelayersState,
/// The cumulative dispatch weight, passed by relayer is not enough to cover dispatch
/// of all bundled messages.
InsufficientDispatchWeight,
/// The message someone is trying to work with (i.e. increase fee) is not yet sent.
MessageIsNotYetSent,
/// Error confirming messages receival.
ReceivalConfirmation(ReceivalConfirmationError),
/// Error generated by the `OwnedBridgeModule` trait.
BridgeModule(bp_runtime::OwnedBridgeModuleError),
}
/// Optional pallet owner.
///
/// Pallet owner has a right to halt all pallet operations and then resume it. If it is
/// `None`, then there are no direct ways to halt/resume pallet operations, but other
/// runtime methods may still be used to do that (i.e. democracy::referendum to update halt
/// flag directly or call the `halt_operations`).
#[pallet::storage]
#[pallet::getter(fn module_owner)]
pub type PalletOwner<T: Config<I>, I: 'static = ()> = StorageValue<_, T::AccountId>;
/// The current operating mode of the pallet.
///
/// Depending on the mode either all, some, or no transactions will be allowed.
#[pallet::storage]
#[pallet::getter(fn operating_mode)]
pub type PalletOperatingMode<T: Config<I>, I: 'static = ()> =
StorageValue<_, MessagesOperatingMode, ValueQuery>;
/// Map of lane id => inbound lane data.
#[pallet::storage]
pub type InboundLanes<T: Config<I>, I: 'static = ()> =
StorageMap<_, Blake2_128Concat, LaneId, StoredInboundLaneData<T, I>, ValueQuery>;
/// Map of lane id => outbound lane data.
#[pallet::storage]
pub type OutboundLanes<T: Config<I>, I: 'static = ()> = StorageMap<
Hasher = Blake2_128Concat,
Key = LaneId,
Value = OutboundLaneData,
QueryKind = ValueQuery,
OnEmpty = GetDefault,
MaxValues = MaybeOutboundLanesCount<T, I>,
>;
/// Map of lane id => is congested signal sent. It is managed by the
/// `bridge_runtime_common::LocalXcmQueueManager`.
///
/// **bridges-v1**: this map is a temporary hack and will be dropped in the `v2`. We can emulate
/// a storage map using `sp_io::unhashed` storage functions, but then benchmarks are not
/// accounting its `proof_size`, so it is missing from the final weights. So we need to make it
/// a map inside some pallet. We could use a simply value instead of map here, because
/// in `v1` we'll only have a single lane. But in the case of adding another lane before `v2`,
/// it'll be easier to deal with the isolated storage map instead.
#[pallet::storage]
pub type OutboundLanesCongestedSignals<T: Config<I>, I: 'static = ()> = StorageMap<
Hasher = Blake2_128Concat,
Key = LaneId,
Value = bool,
QueryKind = ValueQuery,
OnEmpty = GetDefault,
MaxValues = MaybeOutboundLanesCount<T, I>,
>;
/// All queued outbound messages.
#[pallet::storage]
pub type OutboundMessages<T: Config<I>, I: 'static = ()> =
StorageMap<_, Blake2_128Concat, MessageKey, StoredMessagePayload<T, I>>;
#[pallet::genesis_config]
#[derive(DefaultNoBound)]
pub struct GenesisConfig<T: Config<I>, I: 'static = ()> {
/// Initial pallet operating mode.
pub operating_mode: MessagesOperatingMode,
/// Initial pallet owner.
pub owner: Option<T::AccountId>,
/// Dummy marker.
pub phantom: sp_std::marker::PhantomData<I>,
}
#[pallet::genesis_build]
impl<T: Config<I>, I: 'static> BuildGenesisConfig for GenesisConfig<T, I> {
fn build(&self) {
PalletOperatingMode::<T, I>::put(self.operating_mode);
if let Some(ref owner) = self.owner {
PalletOwner::<T, I>::put(owner);
}
}
}
impl<T: Config<I>, I: 'static> Pallet<T, I> {
/// Get stored data of the outbound message with given nonce.
pub fn outbound_message_data(lane: LaneId, nonce: MessageNonce) -> Option<MessagePayload> {
OutboundMessages::<T, I>::get(MessageKey { lane_id: lane, nonce }).map(Into::into)
}
/// Prepare data, related to given inbound message.
pub fn inbound_message_data(
lane: LaneId,
payload: MessagePayload,
outbound_details: OutboundMessageDetails,
) -> InboundMessageDetails {
let mut dispatch_message = DispatchMessage {
key: MessageKey { lane_id: lane, nonce: outbound_details.nonce },
data: payload.into(),
};
InboundMessageDetails {
dispatch_weight: T::MessageDispatch::dispatch_weight(&mut dispatch_message),
}
}
/// Return outbound lane data.
pub fn outbound_lane_data(lane: LaneId) -> OutboundLaneData {
OutboundLanes::<T, I>::get(lane)
}
/// Return inbound lane data.
pub fn inbound_lane_data(lane: LaneId) -> InboundLaneData<T::InboundRelayer> {
InboundLanes::<T, I>::get(lane).0
}
}
/// Get-parameter that returns number of active outbound lanes that the pallet maintains.
pub struct MaybeOutboundLanesCount<T, I>(PhantomData<(T, I)>);
impl<T: Config<I>, I: 'static> Get<Option<u32>> for MaybeOutboundLanesCount<T, I> {
fn get() -> Option<u32> {
Some(T::ActiveOutboundLanes::get().len() as u32)
}
}
}
/// Structure, containing a validated message payload and all the info required
/// to send it on the bridge.
#[derive(Debug, PartialEq, Eq)]
pub struct SendMessageArgs<T: Config<I>, I: 'static> {
lane_id: LaneId,
payload: StoredMessagePayload<T, I>,
}
impl<T, I> bp_messages::source_chain::MessagesBridge<T::OutboundPayload> for Pallet<T, I>
where
T: Config<I>,
I: 'static,
{
type Error = Error<T, I>;
type SendMessageArgs = SendMessageArgs<T, I>;
fn validate_message(
lane: LaneId,
message: &T::OutboundPayload,
) -> Result<SendMessageArgs<T, I>, Self::Error> {
ensure_normal_operating_mode::<T, I>()?;
// let's check if outbound lane is active
ensure!(T::ActiveOutboundLanes::get().contains(&lane), Error::<T, I>::InactiveOutboundLane);
// let's first check if message can be delivered to target chain
T::TargetHeaderChain::verify_message(message).map_err(|err| {
log::trace!(
target: LOG_TARGET,
"Message to lane {:?} is rejected by target chain: {:?}",
lane,
err,
);
Error::<T, I>::MessageRejectedByChainVerifier(err)
})?;
Ok(SendMessageArgs {
lane_id: lane,
payload: StoredMessagePayload::<T, I>::try_from(message.encode()).map_err(|_| {
Error::<T, I>::MessageRejectedByPallet(VerificationError::MessageTooLarge)
})?,
})
}
fn send_message(args: SendMessageArgs<T, I>) -> SendMessageArtifacts {
// save message in outbound storage and emit event
let mut lane = outbound_lane::<T, I>(args.lane_id);
let message_len = args.payload.len();
let nonce = lane.send_message(args.payload);
// return number of messages in the queue to let sender know about its state
let enqueued_messages = lane.data().queued_messages().saturating_len();
log::trace!(
target: LOG_TARGET,
"Accepted message {} to lane {:?}. Message size: {:?}",
nonce,
args.lane_id,
message_len,
);
Pallet::<T, I>::deposit_event(Event::MessageAccepted { lane_id: args.lane_id, nonce });
SendMessageArtifacts { nonce, enqueued_messages }
}
}
/// Ensure that the pallet is in normal operational mode.
fn ensure_normal_operating_mode<T: Config<I>, I: 'static>() -> Result<(), Error<T, I>> {
if PalletOperatingMode::<T, I>::get() ==
MessagesOperatingMode::Basic(BasicOperatingMode::Normal)
{
return Ok(())
}
Err(Error::<T, I>::NotOperatingNormally)
}
/// Creates new inbound lane object, backed by runtime storage.
fn inbound_lane<T: Config<I>, I: 'static>(
lane_id: LaneId,
) -> InboundLane<RuntimeInboundLaneStorage<T, I>> {
InboundLane::new(RuntimeInboundLaneStorage::from_lane_id(lane_id))
}
/// Creates new outbound lane object, backed by runtime storage.
fn outbound_lane<T: Config<I>, I: 'static>(
lane_id: LaneId,
) -> OutboundLane<RuntimeOutboundLaneStorage<T, I>> {
OutboundLane::new(RuntimeOutboundLaneStorage { lane_id, _phantom: Default::default() })
}
/// Runtime inbound lane storage.
struct RuntimeInboundLaneStorage<T: Config<I>, I: 'static = ()> {
lane_id: LaneId,
cached_data: Option<InboundLaneData<T::InboundRelayer>>,
_phantom: PhantomData<I>,
}
impl<T: Config<I>, I: 'static> RuntimeInboundLaneStorage<T, I> {
/// Creates new runtime inbound lane storage.
fn from_lane_id(lane_id: LaneId) -> RuntimeInboundLaneStorage<T, I> {
RuntimeInboundLaneStorage { lane_id, cached_data: None, _phantom: Default::default() }
}
}
impl<T: Config<I>, I: 'static> RuntimeInboundLaneStorage<T, I> {
/// Returns number of bytes that may be subtracted from the PoV component of
/// `receive_messages_proof` call, because the actual inbound lane state is smaller than the
/// maximal configured.
///
/// Maximal inbound lane state set size is configured by the
/// `MaxUnrewardedRelayerEntriesAtInboundLane` constant from the pallet configuration. The PoV
/// of the call includes the maximal size of inbound lane state. If the actual size is smaller,
/// we may subtract extra bytes from this component.
pub fn extra_proof_size_bytes(&mut self) -> u64 {
let max_encoded_len = StoredInboundLaneData::<T, I>::max_encoded_len();
let relayers_count = self.get_or_init_data().relayers.len();
let actual_encoded_len =
InboundLaneData::<T::InboundRelayer>::encoded_size_hint(relayers_count)
.unwrap_or(usize::MAX);
max_encoded_len.saturating_sub(actual_encoded_len) as _
}
}
impl<T: Config<I>, I: 'static> InboundLaneStorage for RuntimeInboundLaneStorage<T, I> {
type Relayer = T::InboundRelayer;
fn id(&self) -> LaneId {
self.lane_id
}
fn max_unrewarded_relayer_entries(&self) -> MessageNonce {
T::MaxUnrewardedRelayerEntriesAtInboundLane::get()
}
fn max_unconfirmed_messages(&self) -> MessageNonce {
T::MaxUnconfirmedMessagesAtInboundLane::get()
}
fn get_or_init_data(&mut self) -> InboundLaneData<T::InboundRelayer> {
match self.cached_data {
Some(ref data) => data.clone(),
None => {
let data: InboundLaneData<T::InboundRelayer> =
InboundLanes::<T, I>::get(self.lane_id).into();
self.cached_data = Some(data.clone());
data
},
}
}
fn set_data(&mut self, data: InboundLaneData<T::InboundRelayer>) {
self.cached_data = Some(data.clone());
InboundLanes::<T, I>::insert(self.lane_id, StoredInboundLaneData::<T, I>(data))
}
}
/// Runtime outbound lane storage.
struct RuntimeOutboundLaneStorage<T, I = ()> {
lane_id: LaneId,
_phantom: PhantomData<(T, I)>,
}
impl<T: Config<I>, I: 'static> OutboundLaneStorage for RuntimeOutboundLaneStorage<T, I> {
type StoredMessagePayload = StoredMessagePayload<T, I>;
fn id(&self) -> LaneId {
self.lane_id
}
fn data(&self) -> OutboundLaneData {
OutboundLanes::<T, I>::get(self.lane_id)
}
fn set_data(&mut self, data: OutboundLaneData) {
OutboundLanes::<T, I>::insert(self.lane_id, data)
}
#[cfg(test)]
fn message(&self, nonce: &MessageNonce) -> Option<Self::StoredMessagePayload> {
OutboundMessages::<T, I>::get(MessageKey { lane_id: self.lane_id, nonce: *nonce })
}
fn save_message(&mut self, nonce: MessageNonce, message_payload: Self::StoredMessagePayload) {
OutboundMessages::<T, I>::insert(
MessageKey { lane_id: self.lane_id, nonce },
message_payload,
);
}
fn remove_message(&mut self, nonce: &MessageNonce) {
OutboundMessages::<T, I>::remove(MessageKey { lane_id: self.lane_id, nonce: *nonce });
}
}
/// Verify messages proof and return proved messages with decoded payload.
fn verify_and_decode_messages_proof<Chain: SourceHeaderChain, DispatchPayload: Decode>(
proof: Chain::MessagesProof,
messages_count: u32,
) -> Result<ProvedMessages<DispatchMessage<DispatchPayload>>, VerificationError> {
// `receive_messages_proof` weight formula and `MaxUnconfirmedMessagesAtInboundLane` check
// guarantees that the `message_count` is sane and Vec<Message> may be allocated.
// (tx with too many messages will either be rejected from the pool, or will fail earlier)
Chain::verify_messages_proof(proof, messages_count).map(|messages_by_lane| {
messages_by_lane
.into_iter()
.map(|(lane, lane_data)| {
(
lane,
ProvedLaneMessages {
lane_state: lane_data.lane_state,
messages: lane_data.messages.into_iter().map(Into::into).collect(),
},
)
})
.collect()
})
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
mock::{
inbound_unrewarded_relayers_state, message, message_payload, run_test,
unrewarded_relayer, AccountId, DbWeight, RuntimeEvent as TestEvent, RuntimeOrigin,
TestDeliveryConfirmationPayments, TestDeliveryPayments, TestMessageDispatch,
TestMessagesDeliveryProof, TestMessagesProof, TestOnMessagesDelivered, TestRelayer,
TestRuntime, TestWeightInfo, MAX_OUTBOUND_PAYLOAD_SIZE,
PAYLOAD_REJECTED_BY_TARGET_CHAIN, REGULAR_PAYLOAD, TEST_LANE_ID, TEST_LANE_ID_2,
TEST_LANE_ID_3, TEST_RELAYER_A, TEST_RELAYER_B,
},
outbound_lane::ReceivalConfirmationError,
};
use bp_messages::{
source_chain::MessagesBridge, BridgeMessagesCall, UnrewardedRelayer,
UnrewardedRelayersState,
};
use bp_test_utils::generate_owned_bridge_module_tests;
use frame_support::{
assert_noop, assert_ok,
dispatch::Pays,
storage::generator::{StorageMap, StorageValue},
traits::Hooks,
weights::Weight,
};
use frame_system::{EventRecord, Pallet as System, Phase};
use sp_runtime::DispatchError;
fn get_ready_for_events() {
System::<TestRuntime>::set_block_number(1);
System::<TestRuntime>::reset_events();
}
fn send_regular_message(lane_id: LaneId) {
get_ready_for_events();
let outbound_lane = outbound_lane::<TestRuntime, ()>(lane_id);
let message_nonce = outbound_lane.data().latest_generated_nonce + 1;
let prev_enqueud_messages = outbound_lane.data().queued_messages().saturating_len();
let valid_message = Pallet::<TestRuntime, ()>::validate_message(lane_id, &REGULAR_PAYLOAD)
.expect("validate_message has failed");
let artifacts = Pallet::<TestRuntime, ()>::send_message(valid_message);
assert_eq!(artifacts.enqueued_messages, prev_enqueud_messages + 1);
// check event with assigned nonce
assert_eq!(
System::<TestRuntime>::events(),
vec![EventRecord {
phase: Phase::Initialization,
event: TestEvent::Messages(Event::MessageAccepted {
lane_id,
nonce: message_nonce
}),
topics: vec![],
}],
);
}
fn receive_messages_delivery_proof() {
System::<TestRuntime>::set_block_number(1);
System::<TestRuntime>::reset_events();
assert_ok!(Pallet::<TestRuntime>::receive_messages_delivery_proof(
RuntimeOrigin::signed(1),
TestMessagesDeliveryProof(Ok((
TEST_LANE_ID,
InboundLaneData {
last_confirmed_nonce: 1,
relayers: vec![UnrewardedRelayer {
relayer: 0,
messages: DeliveredMessages::new(1),
}]
.into_iter()
.collect(),
},
))),
UnrewardedRelayersState {
unrewarded_relayer_entries: 1,
messages_in_oldest_entry: 1,
total_messages: 1,
last_delivered_nonce: 1,
},
));
assert_eq!(
System::<TestRuntime>::events(),
vec![EventRecord {
phase: Phase::Initialization,
event: TestEvent::Messages(Event::MessagesDelivered {
lane_id: TEST_LANE_ID,
messages: DeliveredMessages::new(1),
}),
topics: vec![],
}],
);
}
#[test]
fn pallet_rejects_transactions_if_halted() {
run_test(|| {
// send message first to be able to check that delivery_proof fails later
send_regular_message(TEST_LANE_ID);
PalletOperatingMode::<TestRuntime, ()>::put(MessagesOperatingMode::Basic(
BasicOperatingMode::Halted,
));
assert_noop!(
Pallet::<TestRuntime, ()>::validate_message(TEST_LANE_ID, &REGULAR_PAYLOAD),
Error::<TestRuntime, ()>::NotOperatingNormally,
);
assert_noop!(
Pallet::<TestRuntime>::receive_messages_proof(
RuntimeOrigin::signed(1),
TEST_RELAYER_A,
Ok(vec![message(2, REGULAR_PAYLOAD)]).into(),
1,
REGULAR_PAYLOAD.declared_weight,
),
Error::<TestRuntime, ()>::BridgeModule(bp_runtime::OwnedBridgeModuleError::Halted),
);
assert_noop!(
Pallet::<TestRuntime>::receive_messages_delivery_proof(
RuntimeOrigin::signed(1),
TestMessagesDeliveryProof(Ok((
TEST_LANE_ID,
InboundLaneData {
last_confirmed_nonce: 1,
relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)]
.into_iter()
.collect(),
},
))),
UnrewardedRelayersState {
unrewarded_relayer_entries: 1,
messages_in_oldest_entry: 1,
total_messages: 1,
last_delivered_nonce: 1,
},
),
Error::<TestRuntime, ()>::BridgeModule(bp_runtime::OwnedBridgeModuleError::Halted),
);
});
}
#[test]
fn pallet_rejects_new_messages_in_rejecting_outbound_messages_operating_mode() {
run_test(|| {
// send message first to be able to check that delivery_proof fails later
send_regular_message(TEST_LANE_ID);
PalletOperatingMode::<TestRuntime, ()>::put(
MessagesOperatingMode::RejectingOutboundMessages,
);
assert_noop!(
Pallet::<TestRuntime, ()>::validate_message(TEST_LANE_ID, &REGULAR_PAYLOAD),
Error::<TestRuntime, ()>::NotOperatingNormally,
);
assert_ok!(Pallet::<TestRuntime>::receive_messages_proof(
RuntimeOrigin::signed(1),
TEST_RELAYER_A,
Ok(vec![message(1, REGULAR_PAYLOAD)]).into(),
1,
REGULAR_PAYLOAD.declared_weight,
),);
assert_ok!(Pallet::<TestRuntime>::receive_messages_delivery_proof(
RuntimeOrigin::signed(1),
TestMessagesDeliveryProof(Ok((
TEST_LANE_ID,
InboundLaneData {
last_confirmed_nonce: 1,
relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)]
.into_iter()
.collect(),
},
))),
UnrewardedRelayersState {
unrewarded_relayer_entries: 1,
messages_in_oldest_entry: 1,
total_messages: 1,
last_delivered_nonce: 1,
},
));
});
}
#[test]
fn send_message_works() {
run_test(|| {
send_regular_message(TEST_LANE_ID);
});
}
#[test]
fn send_message_rejects_too_large_message() {
run_test(|| {
let mut message_payload = message_payload(1, 0);
// the payload isn't simply extra, so it'll definitely overflow
// `MAX_OUTBOUND_PAYLOAD_SIZE` if we add `MAX_OUTBOUND_PAYLOAD_SIZE` bytes to extra
message_payload
.extra
.extend_from_slice(&[0u8; MAX_OUTBOUND_PAYLOAD_SIZE as usize]);
assert_noop!(
Pallet::<TestRuntime, ()>::validate_message(TEST_LANE_ID, &message_payload.clone(),),
Error::<TestRuntime, ()>::MessageRejectedByPallet(
VerificationError::MessageTooLarge
),
);
// let's check that we're able to send `MAX_OUTBOUND_PAYLOAD_SIZE` messages
while message_payload.encoded_size() as u32 > MAX_OUTBOUND_PAYLOAD_SIZE {
message_payload.extra.pop();
}
assert_eq!(message_payload.encoded_size() as u32, MAX_OUTBOUND_PAYLOAD_SIZE);
let valid_message =
Pallet::<TestRuntime, ()>::validate_message(TEST_LANE_ID, &message_payload)
.expect("validate_message has failed");
Pallet::<TestRuntime, ()>::send_message(valid_message);
})
}
#[test]
fn chain_verifier_rejects_invalid_message_in_send_message() {
run_test(|| {
// messages with this payload are rejected by target chain verifier
assert_noop!(
Pallet::<TestRuntime, ()>::validate_message(
TEST_LANE_ID,
&PAYLOAD_REJECTED_BY_TARGET_CHAIN,
),
Error::<TestRuntime, ()>::MessageRejectedByChainVerifier(VerificationError::Other(
mock::TEST_ERROR
)),
);
});
}
#[test]
fn receive_messages_proof_works() {
run_test(|| {
assert_ok!(Pallet::<TestRuntime>::receive_messages_proof(
RuntimeOrigin::signed(1),
TEST_RELAYER_A,
Ok(vec![message(1, REGULAR_PAYLOAD)]).into(),
1,
REGULAR_PAYLOAD.declared_weight,
));
assert_eq!(InboundLanes::<TestRuntime>::get(TEST_LANE_ID).0.last_delivered_nonce(), 1);
assert!(TestDeliveryPayments::is_reward_paid(1));
});
}
#[test]
fn receive_messages_proof_updates_confirmed_message_nonce() {
run_test(|| {
// say we have received 10 messages && last confirmed message is 8
InboundLanes::<TestRuntime, ()>::insert(
TEST_LANE_ID,
InboundLaneData {
last_confirmed_nonce: 8,
relayers: vec![
unrewarded_relayer(9, 9, TEST_RELAYER_A),
unrewarded_relayer(10, 10, TEST_RELAYER_B),
]
.into_iter()
.collect(),
},
);
assert_eq!(
inbound_unrewarded_relayers_state(TEST_LANE_ID),
UnrewardedRelayersState {
unrewarded_relayer_entries: 2,
messages_in_oldest_entry: 1,
total_messages: 2,
last_delivered_nonce: 10,
},
);
// message proof includes outbound lane state with latest confirmed message updated to 9
let mut message_proof: TestMessagesProof =
Ok(vec![message(11, REGULAR_PAYLOAD)]).into();
message_proof.result.as_mut().unwrap()[0].1.lane_state =
Some(OutboundLaneData { latest_received_nonce: 9, ..Default::default() });
assert_ok!(Pallet::<TestRuntime>::receive_messages_proof(
RuntimeOrigin::signed(1),
TEST_RELAYER_A,
message_proof,
1,
REGULAR_PAYLOAD.declared_weight,
));
assert_eq!(
InboundLanes::<TestRuntime>::get(TEST_LANE_ID).0,
InboundLaneData {
last_confirmed_nonce: 9,
relayers: vec![
unrewarded_relayer(10, 10, TEST_RELAYER_B),
unrewarded_relayer(11, 11, TEST_RELAYER_A)
]
.into_iter()
.collect(),
},
);
assert_eq!(
inbound_unrewarded_relayers_state(TEST_LANE_ID),
UnrewardedRelayersState {
unrewarded_relayer_entries: 2,
messages_in_oldest_entry: 1,
total_messages: 2,
last_delivered_nonce: 11,
},
);
});
}
#[test]
fn receive_messages_fails_if_dispatcher_is_inactive() {
run_test(|| {
TestMessageDispatch::deactivate();
assert_noop!(
Pallet::<TestRuntime>::receive_messages_proof(
RuntimeOrigin::signed(1),
TEST_RELAYER_A,
Ok(vec![message(1, REGULAR_PAYLOAD)]).into(),
1,
REGULAR_PAYLOAD.declared_weight,
),
Error::<TestRuntime, ()>::MessageDispatchInactive,
);
});
}
#[test]
fn receive_messages_proof_does_not_accept_message_if_dispatch_weight_is_not_enough() {
run_test(|| {
let mut declared_weight = REGULAR_PAYLOAD.declared_weight;
*declared_weight.ref_time_mut() -= 1;
assert_noop!(
Pallet::<TestRuntime>::receive_messages_proof(
RuntimeOrigin::signed(1),
TEST_RELAYER_A,
Ok(vec![message(1, REGULAR_PAYLOAD)]).into(),
1,
declared_weight,
),
Error::<TestRuntime, ()>::InsufficientDispatchWeight
);
assert_eq!(InboundLanes::<TestRuntime>::get(TEST_LANE_ID).last_delivered_nonce(), 0);
});
}
#[test]
fn receive_messages_proof_rejects_invalid_proof() {
run_test(|| {
assert_noop!(
Pallet::<TestRuntime, ()>::receive_messages_proof(
RuntimeOrigin::signed(1),
TEST_RELAYER_A,
Err(()).into(),
1,
Weight::zero(),
),
Error::<TestRuntime, ()>::InvalidMessagesProof,
);
});
}
#[test]
fn receive_messages_proof_rejects_proof_with_too_many_messages() {
run_test(|| {
assert_noop!(
Pallet::<TestRuntime, ()>::receive_messages_proof(
RuntimeOrigin::signed(1),
TEST_RELAYER_A,
Ok(vec![message(1, REGULAR_PAYLOAD)]).into(),
u32::MAX,
Weight::zero(),
),
Error::<TestRuntime, ()>::TooManyMessagesInTheProof,
);
});
}
#[test]
fn receive_messages_delivery_proof_works() {
run_test(|| {
send_regular_message(TEST_LANE_ID);
receive_messages_delivery_proof();
assert_eq!(
OutboundLanes::<TestRuntime, ()>::get(TEST_LANE_ID).latest_received_nonce,
1,
);
});
}
#[test]
fn receive_messages_delivery_proof_rewards_relayers() {
run_test(|| {
send_regular_message(TEST_LANE_ID);
send_regular_message(TEST_LANE_ID);
// this reports delivery of message 1 => reward is paid to TEST_RELAYER_A
let single_message_delivery_proof = TestMessagesDeliveryProof(Ok((
TEST_LANE_ID,
InboundLaneData {
relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)].into_iter().collect(),
..Default::default()
},
)));
let single_message_delivery_proof_size = single_message_delivery_proof.size();
let result = Pallet::<TestRuntime>::receive_messages_delivery_proof(
RuntimeOrigin::signed(1),
single_message_delivery_proof,
UnrewardedRelayersState {
unrewarded_relayer_entries: 1,
messages_in_oldest_entry: 1,
total_messages: 1,
last_delivered_nonce: 1,
},
);
assert_ok!(result);
assert_eq!(
result.unwrap().actual_weight.unwrap(),
TestWeightInfo::receive_messages_delivery_proof_weight(
&PreComputedSize(single_message_delivery_proof_size as _),
&UnrewardedRelayersState {
unrewarded_relayer_entries: 1,
total_messages: 1,
..Default::default()
},
)
);
assert!(TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_A, 1));
assert!(!TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_B, 1));
assert_eq!(TestOnMessagesDelivered::call_arguments(), Some((TEST_LANE_ID, 1)));
// this reports delivery of both message 1 and message 2 => reward is paid only to
// TEST_RELAYER_B
let two_messages_delivery_proof = TestMessagesDeliveryProof(Ok((
TEST_LANE_ID,
InboundLaneData {
relayers: vec![
unrewarded_relayer(1, 1, TEST_RELAYER_A),
unrewarded_relayer(2, 2, TEST_RELAYER_B),
]
.into_iter()
.collect(),
..Default::default()
},
)));
let two_messages_delivery_proof_size = two_messages_delivery_proof.size();
let result = Pallet::<TestRuntime>::receive_messages_delivery_proof(
RuntimeOrigin::signed(1),
two_messages_delivery_proof,
UnrewardedRelayersState {
unrewarded_relayer_entries: 2,
messages_in_oldest_entry: 1,
total_messages: 2,
last_delivered_nonce: 2,
},
);
assert_ok!(result);
// even though the pre-dispatch weight was for two messages, the actual weight is
// for single message only
assert_eq!(
result.unwrap().actual_weight.unwrap(),
TestWeightInfo::receive_messages_delivery_proof_weight(
&PreComputedSize(two_messages_delivery_proof_size as _),
&UnrewardedRelayersState {
unrewarded_relayer_entries: 1,
total_messages: 1,
..Default::default()
},
)
);
assert!(!TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_A, 1));
assert!(TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_B, 1));
assert_eq!(TestOnMessagesDelivered::call_arguments(), Some((TEST_LANE_ID, 0)));
});
}
#[test]
fn receive_messages_delivery_proof_rejects_invalid_proof() {
run_test(|| {
assert_noop!(
Pallet::<TestRuntime>::receive_messages_delivery_proof(
RuntimeOrigin::signed(1),
TestMessagesDeliveryProof(Err(())),
Default::default(),
),
Error::<TestRuntime, ()>::InvalidMessagesDeliveryProof,
);
});
}
#[test]
fn receive_messages_delivery_proof_rejects_proof_if_declared_relayers_state_is_invalid() {
run_test(|| {
// when number of relayers entries is invalid
assert_noop!(
Pallet::<TestRuntime>::receive_messages_delivery_proof(
RuntimeOrigin::signed(1),
TestMessagesDeliveryProof(Ok((
TEST_LANE_ID,
InboundLaneData {
relayers: vec![
unrewarded_relayer(1, 1, TEST_RELAYER_A),
unrewarded_relayer(2, 2, TEST_RELAYER_B)
]
.into_iter()
.collect(),
..Default::default()
}
))),
UnrewardedRelayersState {
unrewarded_relayer_entries: 1,
total_messages: 2,
last_delivered_nonce: 2,
..Default::default()
},
),
Error::<TestRuntime, ()>::InvalidUnrewardedRelayersState,
);
// when number of messages is invalid
assert_noop!(
Pallet::<TestRuntime>::receive_messages_delivery_proof(
RuntimeOrigin::signed(1),
TestMessagesDeliveryProof(Ok((
TEST_LANE_ID,
InboundLaneData {
relayers: vec![
unrewarded_relayer(1, 1, TEST_RELAYER_A),
unrewarded_relayer(2, 2, TEST_RELAYER_B)
]
.into_iter()
.collect(),
..Default::default()
}
))),
UnrewardedRelayersState {
unrewarded_relayer_entries: 2,
total_messages: 1,
last_delivered_nonce: 2,
..Default::default()
},
),
Error::<TestRuntime, ()>::InvalidUnrewardedRelayersState,
);
// when last delivered nonce is invalid
assert_noop!(
Pallet::<TestRuntime>::receive_messages_delivery_proof(
RuntimeOrigin::signed(1),
TestMessagesDeliveryProof(Ok((
TEST_LANE_ID,
InboundLaneData {
relayers: vec![
unrewarded_relayer(1, 1, TEST_RELAYER_A),
unrewarded_relayer(2, 2, TEST_RELAYER_B)
]
.into_iter()
.collect(),
..Default::default()
}
))),
UnrewardedRelayersState {
unrewarded_relayer_entries: 2,
total_messages: 2,
last_delivered_nonce: 8,
..Default::default()
},
),
Error::<TestRuntime, ()>::InvalidUnrewardedRelayersState,
);
});
}
#[test]
fn receive_messages_accepts_single_message_with_invalid_payload() {
run_test(|| {
let mut invalid_message = message(1, REGULAR_PAYLOAD);
invalid_message.payload = Vec::new();
assert_ok!(Pallet::<TestRuntime, ()>::receive_messages_proof(
RuntimeOrigin::signed(1),
TEST_RELAYER_A,
Ok(vec![invalid_message]).into(),
1,
Weight::zero(), /* weight may be zero in this case (all messages are
* improperly encoded) */
),);
assert_eq!(InboundLanes::<TestRuntime>::get(TEST_LANE_ID).last_delivered_nonce(), 1,);
});
}
#[test]
fn receive_messages_accepts_batch_with_message_with_invalid_payload() {
run_test(|| {
let mut invalid_message = message(2, REGULAR_PAYLOAD);
invalid_message.payload = Vec::new();
assert_ok!(Pallet::<TestRuntime, ()>::receive_messages_proof(
RuntimeOrigin::signed(1),
TEST_RELAYER_A,
Ok(
vec![message(1, REGULAR_PAYLOAD), invalid_message, message(3, REGULAR_PAYLOAD),]
)
.into(),
3,
REGULAR_PAYLOAD.declared_weight + REGULAR_PAYLOAD.declared_weight,
),);
assert_eq!(InboundLanes::<TestRuntime>::get(TEST_LANE_ID).last_delivered_nonce(), 3,);
});
}
#[test]
fn actual_dispatch_weight_does_not_overlow() {
run_test(|| {
let message1 = message(1, message_payload(0, u64::MAX / 2));
let message2 = message(2, message_payload(0, u64::MAX / 2));
let message3 = message(3, message_payload(0, u64::MAX / 2));
assert_noop!(
Pallet::<TestRuntime, ()>::receive_messages_proof(
RuntimeOrigin::signed(1),
TEST_RELAYER_A,
// this may cause overflow if source chain storage is invalid
Ok(vec![message1, message2, message3]).into(),
3,
Weight::MAX,
),
Error::<TestRuntime, ()>::InsufficientDispatchWeight
);
assert_eq!(InboundLanes::<TestRuntime>::get(TEST_LANE_ID).last_delivered_nonce(), 0);
});
}
#[test]
fn ref_time_refund_from_receive_messages_proof_works() {
run_test(|| {
fn submit_with_unspent_weight(
nonce: MessageNonce,
unspent_weight: u64,
) -> (Weight, Weight) {
let mut payload = REGULAR_PAYLOAD;
*payload.dispatch_result.unspent_weight.ref_time_mut() = unspent_weight;
let proof = Ok(vec![message(nonce, payload)]).into();
let messages_count = 1;
let pre_dispatch_weight =
<TestRuntime as Config>::WeightInfo::receive_messages_proof_weight(
&proof,
messages_count,
REGULAR_PAYLOAD.declared_weight,
);
let result = Pallet::<TestRuntime>::receive_messages_proof(
RuntimeOrigin::signed(1),
TEST_RELAYER_A,
proof,
messages_count,
REGULAR_PAYLOAD.declared_weight,
)
.expect("delivery has failed");
let post_dispatch_weight =
result.actual_weight.expect("receive_messages_proof always returns Some");
// message delivery transactions are never free
assert_eq!(result.pays_fee, Pays::Yes);
(pre_dispatch_weight, post_dispatch_weight)
}
// when dispatch is returning `unspent_weight < declared_weight`
let (pre, post) = submit_with_unspent_weight(1, 1);
assert_eq!(post.ref_time(), pre.ref_time() - 1);
// when dispatch is returning `unspent_weight = declared_weight`
let (pre, post) =
submit_with_unspent_weight(2, REGULAR_PAYLOAD.declared_weight.ref_time());
assert_eq!(
post.ref_time(),
pre.ref_time() - REGULAR_PAYLOAD.declared_weight.ref_time()
);
// when dispatch is returning `unspent_weight > declared_weight`
let (pre, post) =
submit_with_unspent_weight(3, REGULAR_PAYLOAD.declared_weight.ref_time() + 1);
assert_eq!(
post.ref_time(),
pre.ref_time() - REGULAR_PAYLOAD.declared_weight.ref_time()
);
// when there's no unspent weight
let (pre, post) = submit_with_unspent_weight(4, 0);
assert_eq!(post.ref_time(), pre.ref_time());
// when dispatch is returning `unspent_weight < declared_weight`
let (pre, post) = submit_with_unspent_weight(5, 1);
assert_eq!(post.ref_time(), pre.ref_time() - 1);
});
}
#[test]
fn proof_size_refund_from_receive_messages_proof_works() {
run_test(|| {
let max_entries = crate::mock::MaxUnrewardedRelayerEntriesAtInboundLane::get() as usize;
// if there's maximal number of unrewarded relayer entries at the inbound lane, then
// `proof_size` is unchanged in post-dispatch weight
let proof: TestMessagesProof = Ok(vec![message(101, REGULAR_PAYLOAD)]).into();
let messages_count = 1;
let pre_dispatch_weight =
<TestRuntime as Config>::WeightInfo::receive_messages_proof_weight(
&proof,
messages_count,
REGULAR_PAYLOAD.declared_weight,
);
InboundLanes::<TestRuntime>::insert(
TEST_LANE_ID,
StoredInboundLaneData(InboundLaneData {
relayers: vec![
UnrewardedRelayer {
relayer: 42,
messages: DeliveredMessages { begin: 0, end: 100 }
};
max_entries
]
.into_iter()
.collect(),
last_confirmed_nonce: 0,
}),
);
let post_dispatch_weight = Pallet::<TestRuntime>::receive_messages_proof(
RuntimeOrigin::signed(1),
TEST_RELAYER_A,
proof.clone(),
messages_count,
REGULAR_PAYLOAD.declared_weight,
)
.unwrap()
.actual_weight
.unwrap();
assert_eq!(post_dispatch_weight.proof_size(), pre_dispatch_weight.proof_size());
// if count of unrewarded relayer entries is less than maximal, then some `proof_size`
// must be refunded
InboundLanes::<TestRuntime>::insert(
TEST_LANE_ID,
StoredInboundLaneData(InboundLaneData {
relayers: vec![
UnrewardedRelayer {
relayer: 42,
messages: DeliveredMessages { begin: 0, end: 100 }
};
max_entries - 1
]
.into_iter()
.collect(),
last_confirmed_nonce: 0,
}),
);
let post_dispatch_weight = Pallet::<TestRuntime>::receive_messages_proof(
RuntimeOrigin::signed(1),
TEST_RELAYER_A,
proof,
messages_count,
REGULAR_PAYLOAD.declared_weight,
)
.unwrap()
.actual_weight
.unwrap();
assert!(
post_dispatch_weight.proof_size() < pre_dispatch_weight.proof_size(),
"Expected post-dispatch PoV {} to be less than pre-dispatch PoV {}",
post_dispatch_weight.proof_size(),
pre_dispatch_weight.proof_size(),
);
});
}
#[test]
fn messages_delivered_callbacks_are_called() {
run_test(|| {
send_regular_message(TEST_LANE_ID);
send_regular_message(TEST_LANE_ID);
send_regular_message(TEST_LANE_ID);
// messages 1+2 are confirmed in 1 tx, message 3 in a separate tx
// dispatch of message 2 has failed
let mut delivered_messages_1_and_2 = DeliveredMessages::new(1);
delivered_messages_1_and_2.note_dispatched_message();
let messages_1_and_2_proof = Ok((
TEST_LANE_ID,
InboundLaneData {
last_confirmed_nonce: 0,
relayers: vec![UnrewardedRelayer {
relayer: 0,
messages: delivered_messages_1_and_2.clone(),
}]
.into_iter()
.collect(),
},
));
let delivered_message_3 = DeliveredMessages::new(3);
let messages_3_proof = Ok((
TEST_LANE_ID,
InboundLaneData {
last_confirmed_nonce: 0,
relayers: vec![UnrewardedRelayer { relayer: 0, messages: delivered_message_3 }]
.into_iter()
.collect(),
},
));
// first tx with messages 1+2
assert_ok!(Pallet::<TestRuntime>::receive_messages_delivery_proof(
RuntimeOrigin::signed(1),
TestMessagesDeliveryProof(messages_1_and_2_proof),
UnrewardedRelayersState {
unrewarded_relayer_entries: 1,
messages_in_oldest_entry: 2,
total_messages: 2,
last_delivered_nonce: 2,
},
));
// second tx with message 3
assert_ok!(Pallet::<TestRuntime>::receive_messages_delivery_proof(
RuntimeOrigin::signed(1),
TestMessagesDeliveryProof(messages_3_proof),
UnrewardedRelayersState {
unrewarded_relayer_entries: 1,
messages_in_oldest_entry: 1,
total_messages: 1,
last_delivered_nonce: 3,
},
));
});
}
#[test]
fn receive_messages_delivery_proof_rejects_proof_if_trying_to_confirm_more_messages_than_expected(
) {
run_test(|| {
// send message first to be able to check that delivery_proof fails later
send_regular_message(TEST_LANE_ID);
// 1) InboundLaneData declares that the `last_confirmed_nonce` is 1;
// 2) InboundLaneData has no entries => `InboundLaneData::last_delivered_nonce()`
// returns `last_confirmed_nonce`;
// 3) it means that we're going to confirm delivery of messages 1..=1;
// 4) so the number of declared messages (see `UnrewardedRelayersState`) is `0` and
// numer of actually confirmed messages is `1`.
assert_noop!(
Pallet::<TestRuntime>::receive_messages_delivery_proof(
RuntimeOrigin::signed(1),
TestMessagesDeliveryProof(Ok((
TEST_LANE_ID,
InboundLaneData { last_confirmed_nonce: 1, relayers: Default::default() },
))),
UnrewardedRelayersState { last_delivered_nonce: 1, ..Default::default() },
),
Error::<TestRuntime, ()>::ReceivalConfirmation(
ReceivalConfirmationError::TryingToConfirmMoreMessagesThanExpected
),
);
});
}
#[test]
fn storage_keys_computed_properly() {
assert_eq!(
PalletOperatingMode::<TestRuntime>::storage_value_final_key().to_vec(),
bp_messages::storage_keys::operating_mode_key("Messages").0,
);
assert_eq!(
OutboundMessages::<TestRuntime>::storage_map_final_key(MessageKey {
lane_id: TEST_LANE_ID,
nonce: 42
}),
bp_messages::storage_keys::message_key("Messages", &TEST_LANE_ID, 42).0,
);
assert_eq!(
OutboundLanes::<TestRuntime>::storage_map_final_key(TEST_LANE_ID),
bp_messages::storage_keys::outbound_lane_data_key("Messages", &TEST_LANE_ID).0,
);
assert_eq!(
InboundLanes::<TestRuntime>::storage_map_final_key(TEST_LANE_ID),
bp_messages::storage_keys::inbound_lane_data_key("Messages", &TEST_LANE_ID).0,
);
}
#[test]
fn inbound_message_details_works() {
run_test(|| {
assert_eq!(
Pallet::<TestRuntime>::inbound_message_data(
TEST_LANE_ID,
REGULAR_PAYLOAD.encode(),
OutboundMessageDetails { nonce: 0, dispatch_weight: Weight::zero(), size: 0 },
),
InboundMessageDetails { dispatch_weight: REGULAR_PAYLOAD.declared_weight },
);
});
}
#[test]
fn on_idle_callback_respects_remaining_weight() {
run_test(|| {
send_regular_message(TEST_LANE_ID);
send_regular_message(TEST_LANE_ID);
send_regular_message(TEST_LANE_ID);
send_regular_message(TEST_LANE_ID);
assert_ok!(Pallet::<TestRuntime>::receive_messages_delivery_proof(
RuntimeOrigin::signed(1),
TestMessagesDeliveryProof(Ok((
TEST_LANE_ID,
InboundLaneData {
last_confirmed_nonce: 4,
relayers: vec![unrewarded_relayer(1, 4, TEST_RELAYER_A)]
.into_iter()
.collect(),
},
))),
UnrewardedRelayersState {
unrewarded_relayer_entries: 1,
messages_in_oldest_entry: 4,
total_messages: 4,
last_delivered_nonce: 4,
},
));
// all 4 messages may be pruned now
assert_eq!(
outbound_lane::<TestRuntime, ()>(TEST_LANE_ID).data().latest_received_nonce,
4
);
assert_eq!(
outbound_lane::<TestRuntime, ()>(TEST_LANE_ID).data().oldest_unpruned_nonce,
1
);
System::<TestRuntime>::set_block_number(2);
// if passed wight is too low to do anything
let dbw = DbWeight::get();
assert_eq!(
Pallet::<TestRuntime, ()>::on_idle(0, dbw.reads_writes(1, 1)),
Weight::zero(),
);
assert_eq!(
outbound_lane::<TestRuntime, ()>(TEST_LANE_ID).data().oldest_unpruned_nonce,
1
);
// if passed wight is enough to prune single message
assert_eq!(
Pallet::<TestRuntime, ()>::on_idle(0, dbw.reads_writes(1, 2)),
dbw.reads_writes(1, 2),
);
assert_eq!(
outbound_lane::<TestRuntime, ()>(TEST_LANE_ID).data().oldest_unpruned_nonce,
2
);
// if passed wight is enough to prune two more messages
assert_eq!(
Pallet::<TestRuntime, ()>::on_idle(0, dbw.reads_writes(1, 3)),
dbw.reads_writes(1, 3),
);
assert_eq!(
outbound_lane::<TestRuntime, ()>(TEST_LANE_ID).data().oldest_unpruned_nonce,
4
);
// if passed wight is enough to prune many messages
assert_eq!(
Pallet::<TestRuntime, ()>::on_idle(0, dbw.reads_writes(100, 100)),
dbw.reads_writes(1, 2),
);
assert_eq!(
outbound_lane::<TestRuntime, ()>(TEST_LANE_ID).data().oldest_unpruned_nonce,
5
);
});
}
#[test]
fn on_idle_callback_is_rotating_lanes_to_prune() {
run_test(|| {
// send + receive confirmation for lane 1
send_regular_message(TEST_LANE_ID);
receive_messages_delivery_proof();
// send + receive confirmation for lane 2
send_regular_message(TEST_LANE_ID_2);
assert_ok!(Pallet::<TestRuntime>::receive_messages_delivery_proof(
RuntimeOrigin::signed(1),
TestMessagesDeliveryProof(Ok((
TEST_LANE_ID_2,
InboundLaneData {
last_confirmed_nonce: 1,
relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)]
.into_iter()
.collect(),
},
))),
UnrewardedRelayersState {
unrewarded_relayer_entries: 1,
messages_in_oldest_entry: 1,
total_messages: 1,
last_delivered_nonce: 1,
},
));
// nothing is pruned yet
assert_eq!(
outbound_lane::<TestRuntime, ()>(TEST_LANE_ID).data().latest_received_nonce,
1
);
assert_eq!(
outbound_lane::<TestRuntime, ()>(TEST_LANE_ID).data().oldest_unpruned_nonce,
1
);
assert_eq!(
outbound_lane::<TestRuntime, ()>(TEST_LANE_ID_2).data().latest_received_nonce,
1
);
assert_eq!(
outbound_lane::<TestRuntime, ()>(TEST_LANE_ID_2).data().oldest_unpruned_nonce,
1
);
// in block#2.on_idle lane messages of lane 1 are pruned
let dbw = DbWeight::get();
System::<TestRuntime>::set_block_number(2);
assert_eq!(
Pallet::<TestRuntime, ()>::on_idle(0, dbw.reads_writes(100, 100)),
dbw.reads_writes(1, 2),
);
assert_eq!(
outbound_lane::<TestRuntime, ()>(TEST_LANE_ID).data().oldest_unpruned_nonce,
2
);
assert_eq!(
outbound_lane::<TestRuntime, ()>(TEST_LANE_ID_2).data().oldest_unpruned_nonce,
1
);
// in block#3.on_idle lane messages of lane 2 are pruned
System::<TestRuntime>::set_block_number(3);
assert_eq!(
Pallet::<TestRuntime, ()>::on_idle(0, dbw.reads_writes(100, 100)),
dbw.reads_writes(1, 2),
);
assert_eq!(
outbound_lane::<TestRuntime, ()>(TEST_LANE_ID).data().oldest_unpruned_nonce,
2
);
assert_eq!(
outbound_lane::<TestRuntime, ()>(TEST_LANE_ID_2).data().oldest_unpruned_nonce,
2
);
});
}
#[test]
fn outbound_message_from_unconfigured_lane_is_rejected() {
run_test(|| {
assert_noop!(
Pallet::<TestRuntime, ()>::validate_message(TEST_LANE_ID_3, &REGULAR_PAYLOAD,),
Error::<TestRuntime, ()>::InactiveOutboundLane,
);
});
}
#[test]
fn test_bridge_messages_call_is_correctly_defined() {
let account_id = 1;
let message_proof: TestMessagesProof = Ok(vec![message(1, REGULAR_PAYLOAD)]).into();
let message_delivery_proof = TestMessagesDeliveryProof(Ok((
TEST_LANE_ID,
InboundLaneData {
last_confirmed_nonce: 1,
relayers: vec![UnrewardedRelayer {
relayer: 0,
messages: DeliveredMessages::new(1),
}]
.into_iter()
.collect(),
},
)));
let unrewarded_relayer_state = UnrewardedRelayersState {
unrewarded_relayer_entries: 1,
total_messages: 1,
last_delivered_nonce: 1,
..Default::default()
};
let direct_receive_messages_proof_call = Call::<TestRuntime>::receive_messages_proof {
relayer_id_at_bridged_chain: account_id,
proof: message_proof.clone(),
messages_count: 1,
dispatch_weight: REGULAR_PAYLOAD.declared_weight,
};
let indirect_receive_messages_proof_call = BridgeMessagesCall::<
AccountId,
TestMessagesProof,
TestMessagesDeliveryProof,
>::receive_messages_proof {
relayer_id_at_bridged_chain: account_id,
proof: message_proof,
messages_count: 1,
dispatch_weight: REGULAR_PAYLOAD.declared_weight,
};
assert_eq!(
direct_receive_messages_proof_call.encode(),
indirect_receive_messages_proof_call.encode()
);
let direct_receive_messages_delivery_proof_call =
Call::<TestRuntime>::receive_messages_delivery_proof {
proof: message_delivery_proof.clone(),
relayers_state: unrewarded_relayer_state.clone(),
};
let indirect_receive_messages_delivery_proof_call = BridgeMessagesCall::<
AccountId,
TestMessagesProof,
TestMessagesDeliveryProof,
>::receive_messages_delivery_proof {
proof: message_delivery_proof,
relayers_state: unrewarded_relayer_state,
};
assert_eq!(
direct_receive_messages_delivery_proof_call.encode(),
indirect_receive_messages_delivery_proof_call.encode()
);
}
generate_owned_bridge_module_tests!(
MessagesOperatingMode::Basic(BasicOperatingMode::Normal),
MessagesOperatingMode::Basic(BasicOperatingMode::Halted)
);
#[test]
fn inbound_storage_extra_proof_size_bytes_works() {
fn relayer_entry() -> UnrewardedRelayer<TestRelayer> {
UnrewardedRelayer { relayer: 42u64, messages: DeliveredMessages { begin: 0, end: 100 } }
}
fn storage(relayer_entries: usize) -> RuntimeInboundLaneStorage<TestRuntime, ()> {
RuntimeInboundLaneStorage {
lane_id: Default::default(),
cached_data: Some(InboundLaneData {
relayers: vec![relayer_entry(); relayer_entries].into_iter().collect(),
last_confirmed_nonce: 0,
}),
_phantom: Default::default(),
}
}
let max_entries = crate::mock::MaxUnrewardedRelayerEntriesAtInboundLane::get() as usize;
// when we have exactly `MaxUnrewardedRelayerEntriesAtInboundLane` unrewarded relayers
assert_eq!(storage(max_entries).extra_proof_size_bytes(), 0);
// when we have less than `MaxUnrewardedRelayerEntriesAtInboundLane` unrewarded relayers
assert_eq!(
storage(max_entries - 1).extra_proof_size_bytes(),
relayer_entry().encode().len() as u64
);
assert_eq!(
storage(max_entries - 2).extra_proof_size_bytes(),
2 * relayer_entry().encode().len() as u64
);
// when we have more than `MaxUnrewardedRelayerEntriesAtInboundLane` unrewarded relayers
// (shall not happen in practice)
assert_eq!(storage(max_entries + 1).extra_proof_size_bytes(), 0);
}
#[test]
fn maybe_outbound_lanes_count_returns_correct_value() {
assert_eq!(
MaybeOutboundLanesCount::<TestRuntime, ()>::get(),
Some(mock::ActiveOutboundLanes::get().len() as u32)
);
}
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
// From construct_runtime macro
#![allow(clippy::from_over_into)]
use crate::{Config, StoredMessagePayload};
use bp_messages::{
calc_relayers_rewards,
source_chain::{DeliveryConfirmationPayments, OnMessagesDelivered, TargetHeaderChain},
target_chain::{
DeliveryPayments, DispatchMessage, DispatchMessageData, MessageDispatch,
ProvedLaneMessages, ProvedMessages, SourceHeaderChain,
},
DeliveredMessages, InboundLaneData, LaneId, Message, MessageKey, MessageNonce,
UnrewardedRelayer, UnrewardedRelayersState, VerificationError,
};
use bp_runtime::{messages::MessageDispatchResult, Size};
use codec::{Decode, Encode};
use frame_support::{
derive_impl, parameter_types,
weights::{constants::RocksDbWeight, Weight},
};
use scale_info::TypeInfo;
use sp_runtime::BuildStorage;
use std::{
collections::{BTreeMap, VecDeque},
ops::RangeInclusive,
};
pub type AccountId = u64;
pub type Balance = u64;
#[derive(Decode, Encode, Clone, Debug, PartialEq, Eq, TypeInfo)]
pub struct TestPayload {
/// Field that may be used to identify messages.
pub id: u64,
/// Dispatch weight that is declared by the message sender.
pub declared_weight: Weight,
/// Message dispatch result.
///
/// Note: in correct code `dispatch_result.unspent_weight` will always be <= `declared_weight`,
/// but for test purposes we'll be making it larger than `declared_weight` sometimes.
pub dispatch_result: MessageDispatchResult<TestDispatchLevelResult>,
/// Extra bytes that affect payload size.
pub extra: Vec<u8>,
}
pub type TestMessageFee = u64;
pub type TestRelayer = u64;
pub type TestDispatchLevelResult = ();
type Block = frame_system::mocking::MockBlock<TestRuntime>;
use crate as pallet_bridge_messages;
frame_support::construct_runtime! {
pub enum TestRuntime
{
System: frame_system::{Pallet, Call, Config<T>, Storage, Event<T>},
Balances: pallet_balances::{Pallet, Call, Event<T>},
Messages: pallet_bridge_messages::{Pallet, Call, Event<T>},
}
}
pub type DbWeight = RocksDbWeight;
#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)]
impl frame_system::Config for TestRuntime {
type Block = Block;
type AccountData = pallet_balances::AccountData<Balance>;
type DbWeight = DbWeight;
}
#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)]
impl pallet_balances::Config for TestRuntime {
type ReserveIdentifier = [u8; 8];
type AccountStore = System;
}
parameter_types! {
pub const MaxMessagesToPruneAtOnce: u64 = 10;
pub const MaxUnrewardedRelayerEntriesAtInboundLane: u64 = 16;
pub const MaxUnconfirmedMessagesAtInboundLane: u64 = 128;
pub const TestBridgedChainId: bp_runtime::ChainId = *b"test";
pub const ActiveOutboundLanes: &'static [LaneId] = &[TEST_LANE_ID, TEST_LANE_ID_2];
}
/// weights of messages pallet calls we use in tests.
pub type TestWeightInfo = ();
impl Config for TestRuntime {
type RuntimeEvent = RuntimeEvent;
type WeightInfo = TestWeightInfo;
type ActiveOutboundLanes = ActiveOutboundLanes;
type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane;
type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane;
type MaximalOutboundPayloadSize = frame_support::traits::ConstU32<MAX_OUTBOUND_PAYLOAD_SIZE>;
type OutboundPayload = TestPayload;
type InboundPayload = TestPayload;
type InboundRelayer = TestRelayer;
type DeliveryPayments = TestDeliveryPayments;
type TargetHeaderChain = TestTargetHeaderChain;
type DeliveryConfirmationPayments = TestDeliveryConfirmationPayments;
type OnMessagesDelivered = TestOnMessagesDelivered;
type SourceHeaderChain = TestSourceHeaderChain;
type MessageDispatch = TestMessageDispatch;
type BridgedChainId = TestBridgedChainId;
}
#[cfg(feature = "runtime-benchmarks")]
impl crate::benchmarking::Config<()> for TestRuntime {
fn bench_lane_id() -> LaneId {
TEST_LANE_ID
}
fn prepare_message_proof(
params: crate::benchmarking::MessageProofParams,
) -> (TestMessagesProof, Weight) {
// in mock run we only care about benchmarks correctness, not the benchmark results
// => ignore size related arguments
let (messages, total_dispatch_weight) =
params.message_nonces.into_iter().map(|n| message(n, REGULAR_PAYLOAD)).fold(
(Vec::new(), Weight::zero()),
|(mut messages, total_dispatch_weight), message| {
let weight = REGULAR_PAYLOAD.declared_weight;
messages.push(message);
(messages, total_dispatch_weight.saturating_add(weight))
},
);
let mut proof: TestMessagesProof = Ok(messages).into();
proof.result.as_mut().unwrap().get_mut(0).unwrap().1.lane_state = params.outbound_lane_data;
(proof, total_dispatch_weight)
}
fn prepare_message_delivery_proof(
params: crate::benchmarking::MessageDeliveryProofParams<AccountId>,
) -> TestMessagesDeliveryProof {
// in mock run we only care about benchmarks correctness, not the benchmark results
// => ignore size related arguments
TestMessagesDeliveryProof(Ok((params.lane, params.inbound_lane_data)))
}
fn is_relayer_rewarded(_relayer: &AccountId) -> bool {
true
}
}
impl Size for TestPayload {
fn size(&self) -> u32 {
16 + self.extra.len() as u32
}
}
/// Maximal outbound payload size.
pub const MAX_OUTBOUND_PAYLOAD_SIZE: u32 = 4096;
/// Account that has balance to use in tests.
pub const ENDOWED_ACCOUNT: AccountId = 0xDEAD;
/// Account id of test relayer.
pub const TEST_RELAYER_A: AccountId = 100;
/// Account id of additional test relayer - B.
pub const TEST_RELAYER_B: AccountId = 101;
/// Account id of additional test relayer - C.
pub const TEST_RELAYER_C: AccountId = 102;
/// Error that is returned by all test implementations.
pub const TEST_ERROR: &str = "Test error";
/// Lane that we're using in tests.
pub const TEST_LANE_ID: LaneId = LaneId([0, 0, 0, 1]);
/// Secondary lane that we're using in tests.
pub const TEST_LANE_ID_2: LaneId = LaneId([0, 0, 0, 2]);
/// Inactive outbound lane.
pub const TEST_LANE_ID_3: LaneId = LaneId([0, 0, 0, 3]);
/// Regular message payload.
pub const REGULAR_PAYLOAD: TestPayload = message_payload(0, 50);
/// Payload that is rejected by `TestTargetHeaderChain`.
pub const PAYLOAD_REJECTED_BY_TARGET_CHAIN: TestPayload = message_payload(1, 50);
/// Vec of proved messages, grouped by lane.
pub type MessagesByLaneVec = Vec<(LaneId, ProvedLaneMessages<Message>)>;
/// Test messages proof.
#[derive(Debug, Encode, Decode, Clone, PartialEq, Eq, TypeInfo)]
pub struct TestMessagesProof {
pub result: Result<MessagesByLaneVec, ()>,
}
impl Size for TestMessagesProof {
fn size(&self) -> u32 {
0
}
}
impl From<Result<Vec<Message>, ()>> for TestMessagesProof {
fn from(result: Result<Vec<Message>, ()>) -> Self {
Self {
result: result.map(|messages| {
let mut messages_by_lane: BTreeMap<LaneId, ProvedLaneMessages<Message>> =
BTreeMap::new();
for message in messages {
messages_by_lane.entry(message.key.lane_id).or_default().messages.push(message);
}
messages_by_lane.into_iter().collect()
}),
}
}
}
/// Messages delivery proof used in tests.
#[derive(Debug, Encode, Decode, Eq, Clone, PartialEq, TypeInfo)]
pub struct TestMessagesDeliveryProof(pub Result<(LaneId, InboundLaneData<TestRelayer>), ()>);
impl Size for TestMessagesDeliveryProof {
fn size(&self) -> u32 {
0
}
}
/// Target header chain that is used in tests.
#[derive(Debug, Default)]
pub struct TestTargetHeaderChain;
impl TargetHeaderChain<TestPayload, TestRelayer> for TestTargetHeaderChain {
type MessagesDeliveryProof = TestMessagesDeliveryProof;
fn verify_message(payload: &TestPayload) -> Result<(), VerificationError> {
if *payload == PAYLOAD_REJECTED_BY_TARGET_CHAIN {
Err(VerificationError::Other(TEST_ERROR))
} else {
Ok(())
}
}
fn verify_messages_delivery_proof(
proof: Self::MessagesDeliveryProof,
) -> Result<(LaneId, InboundLaneData<TestRelayer>), VerificationError> {
proof.0.map_err(|_| VerificationError::Other(TEST_ERROR))
}
}
/// Reward payments at the target chain during delivery transaction.
#[derive(Debug, Default)]
pub struct TestDeliveryPayments;
impl TestDeliveryPayments {
/// Returns true if given relayer has been rewarded with given balance. The reward-paid flag is
/// cleared after the call.
pub fn is_reward_paid(relayer: AccountId) -> bool {
let key = (b":delivery-relayer-reward:", relayer).encode();
frame_support::storage::unhashed::take::<bool>(&key).is_some()
}
}
impl DeliveryPayments<AccountId> for TestDeliveryPayments {
type Error = &'static str;
fn pay_reward(
relayer: AccountId,
_total_messages: MessageNonce,
_valid_messages: MessageNonce,
_actual_weight: Weight,
) {
let key = (b":delivery-relayer-reward:", relayer).encode();
frame_support::storage::unhashed::put(&key, &true);
}
}
/// Reward payments at the source chain during delivery confirmation transaction.
#[derive(Debug, Default)]
pub struct TestDeliveryConfirmationPayments;
impl TestDeliveryConfirmationPayments {
/// Returns true if given relayer has been rewarded with given balance. The reward-paid flag is
/// cleared after the call.
pub fn is_reward_paid(relayer: AccountId, fee: TestMessageFee) -> bool {
let key = (b":relayer-reward:", relayer, fee).encode();
frame_support::storage::unhashed::take::<bool>(&key).is_some()
}
}
impl DeliveryConfirmationPayments<AccountId> for TestDeliveryConfirmationPayments {
type Error = &'static str;
fn pay_reward(
_lane_id: LaneId,
messages_relayers: VecDeque<UnrewardedRelayer<AccountId>>,
_confirmation_relayer: &AccountId,
received_range: &RangeInclusive<MessageNonce>,
) -> MessageNonce {
let relayers_rewards = calc_relayers_rewards(messages_relayers, received_range);
let rewarded_relayers = relayers_rewards.len();
for (relayer, reward) in &relayers_rewards {
let key = (b":relayer-reward:", relayer, reward).encode();
frame_support::storage::unhashed::put(&key, &true);
}
rewarded_relayers as _
}
}
/// Source header chain that is used in tests.
#[derive(Debug)]
pub struct TestSourceHeaderChain;
impl SourceHeaderChain for TestSourceHeaderChain {
type MessagesProof = TestMessagesProof;
fn verify_messages_proof(
proof: Self::MessagesProof,
_messages_count: u32,
) -> Result<ProvedMessages<Message>, VerificationError> {
proof
.result
.map(|proof| proof.into_iter().collect())
.map_err(|_| VerificationError::Other(TEST_ERROR))
}
}
/// Test message dispatcher.
#[derive(Debug)]
pub struct TestMessageDispatch;
impl TestMessageDispatch {
pub fn deactivate() {
frame_support::storage::unhashed::put(b"TestMessageDispatch.IsCongested", &true)
}
}
impl MessageDispatch for TestMessageDispatch {
type DispatchPayload = TestPayload;
type DispatchLevelResult = TestDispatchLevelResult;
fn is_active() -> bool {
!frame_support::storage::unhashed::get_or_default::<bool>(
b"TestMessageDispatch.IsCongested",
)
}
fn dispatch_weight(message: &mut DispatchMessage<TestPayload>) -> Weight {
match message.data.payload.as_ref() {
Ok(payload) => payload.declared_weight,
Err(_) => Weight::zero(),
}
}
fn dispatch(
message: DispatchMessage<TestPayload>,
) -> MessageDispatchResult<TestDispatchLevelResult> {
match message.data.payload.as_ref() {
Ok(payload) => payload.dispatch_result.clone(),
Err(_) => dispatch_result(0),
}
}
}
/// Test callback, called during message delivery confirmation transaction.
pub struct TestOnMessagesDelivered;
impl TestOnMessagesDelivered {
pub fn call_arguments() -> Option<(LaneId, MessageNonce)> {
frame_support::storage::unhashed::get(b"TestOnMessagesDelivered.OnMessagesDelivered")
}
}
impl OnMessagesDelivered for TestOnMessagesDelivered {
fn on_messages_delivered(lane: LaneId, enqueued_messages: MessageNonce) {
frame_support::storage::unhashed::put(
b"TestOnMessagesDelivered.OnMessagesDelivered",
&(lane, enqueued_messages),
);
}
}
/// Return test lane message with given nonce and payload.
pub fn message(nonce: MessageNonce, payload: TestPayload) -> Message {
Message { key: MessageKey { lane_id: TEST_LANE_ID, nonce }, payload: payload.encode() }
}
/// Return valid outbound message data, constructed from given payload.
pub fn outbound_message_data(payload: TestPayload) -> StoredMessagePayload<TestRuntime, ()> {
StoredMessagePayload::<TestRuntime, ()>::try_from(payload.encode()).expect("payload too large")
}
/// Return valid inbound (dispatch) message data, constructed from given payload.
pub fn inbound_message_data(payload: TestPayload) -> DispatchMessageData<TestPayload> {
DispatchMessageData { payload: Ok(payload) }
}
/// Constructs message payload using given arguments and zero unspent weight.
pub const fn message_payload(id: u64, declared_weight: u64) -> TestPayload {
TestPayload {
id,
declared_weight: Weight::from_parts(declared_weight, 0),
dispatch_result: dispatch_result(0),
extra: Vec::new(),
}
}
/// Returns message dispatch result with given unspent weight.
pub const fn dispatch_result(
unspent_weight: u64,
) -> MessageDispatchResult<TestDispatchLevelResult> {
MessageDispatchResult {
unspent_weight: Weight::from_parts(unspent_weight, 0),
dispatch_level_result: (),
}
}
/// Constructs unrewarded relayer entry from nonces range and relayer id.
pub fn unrewarded_relayer(
begin: MessageNonce,
end: MessageNonce,
relayer: TestRelayer,
) -> UnrewardedRelayer<TestRelayer> {
UnrewardedRelayer { relayer, messages: DeliveredMessages { begin, end } }
}
/// Returns unrewarded relayers state at given lane.
pub fn inbound_unrewarded_relayers_state(lane: bp_messages::LaneId) -> UnrewardedRelayersState {
let inbound_lane_data = crate::InboundLanes::<TestRuntime, ()>::get(lane).0;
UnrewardedRelayersState::from(&inbound_lane_data)
}
/// Return test externalities to use in tests.
pub fn new_test_ext() -> sp_io::TestExternalities {
let mut t = frame_system::GenesisConfig::<TestRuntime>::default().build_storage().unwrap();
pallet_balances::GenesisConfig::<TestRuntime> { balances: vec![(ENDOWED_ACCOUNT, 1_000_000)] }
.assimilate_storage(&mut t)
.unwrap();
sp_io::TestExternalities::new(t)
}
/// Run pallet test.
pub fn run_test<T>(test: impl FnOnce() -> T) -> T {
new_test_ext().execute_with(test)
}