Skip to content
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Logic for checking if GRANDPA Finality Proofs are valid and optimal.
use crate::justification::{
verification::{Error, JustificationVerifier, PrecommitError},
GrandpaJustification,
};
use crate::justification::verification::{
IterationFlow, JustificationVerificationContext, SignedPrecommit,
};
use sp_consensus_grandpa::AuthorityId;
use sp_runtime::traits::Header as HeaderT;
use sp_std::{collections::btree_set::BTreeSet, vec::Vec};
/// Verification callbacks that reject all unknown, duplicate or redundant votes.
struct StrictJustificationVerifier {
votes: BTreeSet<AuthorityId>,
}
impl<Header: HeaderT> JustificationVerifier<Header> for StrictJustificationVerifier {
fn process_duplicate_votes_ancestries(
&mut self,
_duplicate_votes_ancestries: Vec<usize>,
) -> Result<(), Error> {
Err(Error::DuplicateVotesAncestries)
}
fn process_redundant_vote(
&mut self,
_precommit_idx: usize,
) -> Result<IterationFlow, PrecommitError> {
Err(PrecommitError::RedundantAuthorityVote)
}
fn process_known_authority_vote(
&mut self,
_precommit_idx: usize,
signed: &SignedPrecommit<Header>,
) -> Result<IterationFlow, PrecommitError> {
if self.votes.contains(&signed.id) {
// There's a lot of code in `validate_commit` and `import_precommit` functions
// inside `finality-grandpa` crate (mostly related to reporting equivocations).
// But the only thing that we care about is that only first vote from the
// authority is accepted
return Err(PrecommitError::DuplicateAuthorityVote)
}
Ok(IterationFlow::Run)
}
fn process_unknown_authority_vote(
&mut self,
_precommit_idx: usize,
) -> Result<(), PrecommitError> {
Err(PrecommitError::UnknownAuthorityVote)
}
fn process_unrelated_ancestry_vote(
&mut self,
_precommit_idx: usize,
) -> Result<IterationFlow, PrecommitError> {
Err(PrecommitError::UnrelatedAncestryVote)
}
fn process_invalid_signature_vote(
&mut self,
_precommit_idx: usize,
) -> Result<(), PrecommitError> {
Err(PrecommitError::InvalidAuthoritySignature)
}
fn process_valid_vote(&mut self, signed: &SignedPrecommit<Header>) {
self.votes.insert(signed.id.clone());
}
fn process_redundant_votes_ancestries(
&mut self,
_redundant_votes_ancestries: BTreeSet<Header::Hash>,
) -> Result<(), Error> {
Err(Error::RedundantVotesAncestries)
}
}
/// Verify that justification, that is generated by given authority set, finalizes given header.
pub fn verify_justification<Header: HeaderT>(
finalized_target: (Header::Hash, Header::Number),
context: &JustificationVerificationContext,
justification: &GrandpaJustification<Header>,
) -> Result<(), Error> {
let mut verifier = StrictJustificationVerifier { votes: BTreeSet::new() };
verifier.verify_justification(finalized_target, context, justification)
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Defines traits which represent a common interface for Substrate pallets which want to
//! incorporate bridge functionality.
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
use crate::justification::{
GrandpaJustification, JustificationVerificationContext, JustificationVerificationError,
};
use bp_runtime::{
BasicOperatingMode, Chain, HashOf, HasherOf, HeaderOf, RawStorageProof, StorageProofChecker,
StorageProofError, UnderlyingChainProvider,
};
use codec::{Codec, Decode, Encode, EncodeLike, MaxEncodedLen};
use core::{clone::Clone, cmp::Eq, default::Default, fmt::Debug};
use frame_support::PalletError;
use scale_info::TypeInfo;
use serde::{Deserialize, Serialize};
use sp_consensus_grandpa::{AuthorityList, ConsensusLog, SetId, GRANDPA_ENGINE_ID};
use sp_runtime::{traits::Header as HeaderT, Digest, RuntimeDebug};
use sp_std::{boxed::Box, vec::Vec};
pub mod justification;
pub mod storage_keys;
/// Header chain error.
#[derive(Clone, Decode, Encode, Eq, PartialEq, PalletError, Debug, TypeInfo)]
pub enum HeaderChainError {
/// Header with given hash is missing from the chain.
UnknownHeader,
/// Storage proof related error.
StorageProof(StorageProofError),
}
/// Header data that we're storing on-chain.
///
/// Even though we may store full header, our applications (XCM) only use couple of header
/// fields. Extracting those values makes on-chain storage and PoV smaller, which is good.
#[derive(Clone, Decode, Encode, Eq, MaxEncodedLen, PartialEq, RuntimeDebug, TypeInfo)]
pub struct StoredHeaderData<Number, Hash> {
/// Header number.
pub number: Number,
/// Header state root.
pub state_root: Hash,
}
/// Stored header data builder.
pub trait StoredHeaderDataBuilder<Number, Hash> {
/// Build header data from self.
fn build(&self) -> StoredHeaderData<Number, Hash>;
}
impl<H: HeaderT> StoredHeaderDataBuilder<H::Number, H::Hash> for H {
fn build(&self) -> StoredHeaderData<H::Number, H::Hash> {
StoredHeaderData { number: *self.number(), state_root: *self.state_root() }
}
}
/// Substrate header chain, abstracted from the way it is stored.
pub trait HeaderChain<C: Chain> {
/// Returns state (storage) root of given finalized header.
fn finalized_header_state_root(header_hash: HashOf<C>) -> Option<HashOf<C>>;
/// Get storage proof checker using finalized header.
fn storage_proof_checker(
header_hash: HashOf<C>,
storage_proof: RawStorageProof,
) -> Result<StorageProofChecker<HasherOf<C>>, HeaderChainError> {
let state_root = Self::finalized_header_state_root(header_hash)
.ok_or(HeaderChainError::UnknownHeader)?;
StorageProofChecker::new(state_root, storage_proof).map_err(HeaderChainError::StorageProof)
}
}
/// A type that can be used as a parameter in a dispatchable function.
///
/// When using `decl_module` all arguments for call functions must implement this trait.
pub trait Parameter: Codec + EncodeLike + Clone + Eq + Debug + TypeInfo {}
impl<T> Parameter for T where T: Codec + EncodeLike + Clone + Eq + Debug + TypeInfo {}
/// A GRANDPA Authority List and ID.
#[derive(Default, Encode, Eq, Decode, RuntimeDebug, PartialEq, Clone, TypeInfo)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct AuthoritySet {
/// List of GRANDPA authorities for the current round.
pub authorities: AuthorityList,
/// Monotonic identifier of the current GRANDPA authority set.
pub set_id: SetId,
}
impl AuthoritySet {
/// Create a new GRANDPA Authority Set.
pub fn new(authorities: AuthorityList, set_id: SetId) -> Self {
Self { authorities, set_id }
}
}
/// Data required for initializing the GRANDPA bridge pallet.
///
/// The bridge needs to know where to start its sync from, and this provides that initial context.
#[derive(
Default, Encode, Decode, RuntimeDebug, PartialEq, Eq, Clone, TypeInfo, Serialize, Deserialize,
)]
pub struct InitializationData<H: HeaderT> {
/// The header from which we should start syncing.
pub header: Box<H>,
/// The initial authorities of the pallet.
pub authority_list: AuthorityList,
/// The ID of the initial authority set.
pub set_id: SetId,
/// Pallet operating mode.
pub operating_mode: BasicOperatingMode,
}
/// Abstract finality proof that is justifying block finality.
pub trait FinalityProof<Hash, Number>: Clone + Send + Sync + Debug {
/// Return hash of header that this proof is generated for.
fn target_header_hash(&self) -> Hash;
/// Return number of header that this proof is generated for.
fn target_header_number(&self) -> Number;
}
/// A trait that provides helper methods for querying the consensus log.
pub trait ConsensusLogReader {
/// Returns true if digest contains item that schedules authorities set change.
fn schedules_authorities_change(digest: &Digest) -> bool;
}
/// A struct that provides helper methods for querying the GRANDPA consensus log.
pub struct GrandpaConsensusLogReader<Number>(sp_std::marker::PhantomData<Number>);
impl<Number: Codec> GrandpaConsensusLogReader<Number> {
/// Find and return scheduled (regular) change digest item.
pub fn find_scheduled_change(
digest: &Digest,
) -> Option<sp_consensus_grandpa::ScheduledChange<Number>> {
// find the first consensus digest with the right ID which converts to
// the right kind of consensus log.
digest
.convert_first(|log| log.consensus_try_to(&GRANDPA_ENGINE_ID))
.and_then(|log| match log {
ConsensusLog::ScheduledChange(change) => Some(change),
_ => None,
})
}
/// Find and return forced change digest item. Or light client can't do anything
/// with forced changes, so we can't accept header with the forced change digest.
pub fn find_forced_change(
digest: &Digest,
) -> Option<(Number, sp_consensus_grandpa::ScheduledChange<Number>)> {
// find the first consensus digest with the right ID which converts to
// the right kind of consensus log.
digest
.convert_first(|log| log.consensus_try_to(&GRANDPA_ENGINE_ID))
.and_then(|log| match log {
ConsensusLog::ForcedChange(delay, change) => Some((delay, change)),
_ => None,
})
}
}
impl<Number: Codec> ConsensusLogReader for GrandpaConsensusLogReader<Number> {
fn schedules_authorities_change(digest: &Digest) -> bool {
GrandpaConsensusLogReader::<Number>::find_scheduled_change(digest).is_some()
}
}
/// The finality-related info associated to a header.
#[derive(Encode, Decode, Debug, PartialEq, Clone, TypeInfo)]
pub struct HeaderFinalityInfo<FinalityProof, FinalityVerificationContext> {
/// The header finality proof.
pub finality_proof: FinalityProof,
/// The new verification context introduced by the header.
pub new_verification_context: Option<FinalityVerificationContext>,
}
/// Grandpa-related info associated to a header. This info can be saved to events.
pub type StoredHeaderGrandpaInfo<Header> =
HeaderFinalityInfo<GrandpaJustification<Header>, AuthoritySet>;
/// Processed Grandpa-related info associated to a header.
pub type HeaderGrandpaInfo<Header> =
HeaderFinalityInfo<GrandpaJustification<Header>, JustificationVerificationContext>;
impl<Header: HeaderT> TryFrom<StoredHeaderGrandpaInfo<Header>> for HeaderGrandpaInfo<Header> {
type Error = JustificationVerificationError;
fn try_from(grandpa_info: StoredHeaderGrandpaInfo<Header>) -> Result<Self, Self::Error> {
Ok(Self {
finality_proof: grandpa_info.finality_proof,
new_verification_context: match grandpa_info.new_verification_context {
Some(authority_set) => Some(authority_set.try_into()?),
None => None,
},
})
}
}
/// Helper trait for finding equivocations in finality proofs.
pub trait FindEquivocations<FinalityProof, FinalityVerificationContext, EquivocationProof> {
/// The type returned when encountering an error while looking for equivocations.
type Error: Debug;
/// Find equivocations.
fn find_equivocations(
verification_context: &FinalityVerificationContext,
synced_proof: &FinalityProof,
source_proofs: &[FinalityProof],
) -> Result<Vec<EquivocationProof>, Self::Error>;
}
/// A minimized version of `pallet-bridge-grandpa::Call` that can be used without a runtime.
#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
#[allow(non_camel_case_types)]
pub enum BridgeGrandpaCall<Header: HeaderT> {
/// `pallet-bridge-grandpa::Call::submit_finality_proof`
#[codec(index = 0)]
submit_finality_proof {
/// The header that we are going to finalize.
finality_target: Box<Header>,
/// Finality justification for the `finality_target`.
justification: justification::GrandpaJustification<Header>,
},
/// `pallet-bridge-grandpa::Call::initialize`
#[codec(index = 1)]
initialize {
/// All data, required to initialize the pallet.
init_data: InitializationData<Header>,
},
/// `pallet-bridge-grandpa::Call::submit_finality_proof_ex`
#[codec(index = 4)]
submit_finality_proof_ex {
/// The header that we are going to finalize.
finality_target: Box<Header>,
/// Finality justification for the `finality_target`.
justification: justification::GrandpaJustification<Header>,
/// An identifier of the validators set, that have signed the justification.
current_set_id: SetId,
},
}
/// The `BridgeGrandpaCall` used by a chain.
pub type BridgeGrandpaCallOf<C> = BridgeGrandpaCall<HeaderOf<C>>;
/// Substrate-based chain that is using direct GRANDPA finality.
///
/// Keep in mind that parachains are relying on relay chain GRANDPA, so they should not implement
/// this trait.
pub trait ChainWithGrandpa: Chain {
/// Name of the bridge GRANDPA pallet (used in `construct_runtime` macro call) that is deployed
/// at some other chain to bridge with this `ChainWithGrandpa`.
///
/// We assume that all chains that are bridging with this `ChainWithGrandpa` are using
/// the same name.
const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str;
/// Max number of GRANDPA authorities at the chain.
///
/// This is a strict constant. If bridged chain will have more authorities than that,
/// the GRANDPA bridge pallet may halt.
const MAX_AUTHORITIES_COUNT: u32;
/// Max reasonable number of headers in `votes_ancestries` vector of the GRANDPA justification.
///
/// This isn't a strict limit. The relay may submit justifications with more headers in its
/// ancestry and the pallet will accept such justification. The limit is only used to compute
/// maximal refund amount and submitting justifications which exceed the limit, may be costly
/// to submitter.
const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32;
/// Maximal size of the mandatory chain header. Mandatory header is the header that enacts new
/// GRANDPA authorities set (so it has large digest inside).
///
/// This isn't a strict limit. The relay may submit larger headers and the pallet will accept
/// the call. The limit is only used to compute maximal refund amount and doing calls which
/// exceed the limit, may be costly to submitter.
const MAX_MANDATORY_HEADER_SIZE: u32;
/// Average size of the chain header. We don't expect to see there headers that change GRANDPA
/// authorities set (GRANDPA will probably be able to finalize at least one additional header
/// per session on non test chains), so this is average size of headers that aren't changing the
/// set.
///
/// This isn't a strict limit. The relay may submit justifications with larger headers and the
/// pallet will accept the call. However, if the total size of all `submit_finality_proof`
/// arguments exceeds the maximal size, computed using this average size, relayer will only get
/// partial refund.
///
/// We expect some headers on production chains that are above this size. But they are rare and
/// if rellayer cares about its profitability, we expect it'll select other headers for
/// submission.
const AVERAGE_HEADER_SIZE: u32;
}
impl<T> ChainWithGrandpa for T
where
T: Chain + UnderlyingChainProvider,
T::Chain: ChainWithGrandpa,
{
const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str =
<T::Chain as ChainWithGrandpa>::WITH_CHAIN_GRANDPA_PALLET_NAME;
const MAX_AUTHORITIES_COUNT: u32 = <T::Chain as ChainWithGrandpa>::MAX_AUTHORITIES_COUNT;
const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 =
<T::Chain as ChainWithGrandpa>::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY;
const MAX_MANDATORY_HEADER_SIZE: u32 =
<T::Chain as ChainWithGrandpa>::MAX_MANDATORY_HEADER_SIZE;
const AVERAGE_HEADER_SIZE: u32 = <T::Chain as ChainWithGrandpa>::AVERAGE_HEADER_SIZE;
}
/// Returns maximal expected size of `submit_finality_proof` call arguments.
pub fn max_expected_submit_finality_proof_arguments_size<C: ChainWithGrandpa>(
is_mandatory_finality_target: bool,
precommits: u32,
) -> u32 {
let max_expected_justification_size =
GrandpaJustification::<HeaderOf<C>>::max_reasonable_size::<C>(precommits);
// call arguments are header and justification
let max_expected_finality_target_size = if is_mandatory_finality_target {
C::MAX_MANDATORY_HEADER_SIZE
} else {
C::AVERAGE_HEADER_SIZE
};
max_expected_finality_target_size.saturating_add(max_expected_justification_size)
}
#[cfg(test)]
mod tests {
use super::*;
use bp_runtime::ChainId;
use frame_support::weights::Weight;
use sp_runtime::{testing::H256, traits::BlakeTwo256, MultiSignature};
struct TestChain;
impl Chain for TestChain {
const ID: ChainId = *b"test";
type BlockNumber = u32;
type Hash = H256;
type Hasher = BlakeTwo256;
type Header = sp_runtime::generic::Header<u32, BlakeTwo256>;
type AccountId = u64;
type Balance = u64;
type Nonce = u64;
type Signature = MultiSignature;
fn max_extrinsic_size() -> u32 {
0
}
fn max_extrinsic_weight() -> Weight {
Weight::zero()
}
}
impl ChainWithGrandpa for TestChain {
const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = "Test";
const MAX_AUTHORITIES_COUNT: u32 = 128;
const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 2;
const MAX_MANDATORY_HEADER_SIZE: u32 = 100_000;
const AVERAGE_HEADER_SIZE: u32 = 1_024;
}
#[test]
fn max_expected_submit_finality_proof_arguments_size_respects_mandatory_argument() {
assert!(
max_expected_submit_finality_proof_arguments_size::<TestChain>(true, 100) >
max_expected_submit_finality_proof_arguments_size::<TestChain>(false, 100),
);
}
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Storage keys of bridge GRANDPA pallet.
/// Name of the `IsHalted` storage value.
pub const PALLET_OPERATING_MODE_VALUE_NAME: &str = "PalletOperatingMode";
/// Name of the `BestFinalized` storage value.
pub const BEST_FINALIZED_VALUE_NAME: &str = "BestFinalized";
/// Name of the `CurrentAuthoritySet` storage value.
pub const CURRENT_AUTHORITY_SET_VALUE_NAME: &str = "CurrentAuthoritySet";
use sp_core::storage::StorageKey;
/// Storage key of the `PalletOperatingMode` variable in the runtime storage.
pub fn pallet_operating_mode_key(pallet_prefix: &str) -> StorageKey {
StorageKey(
bp_runtime::storage_value_final_key(
pallet_prefix.as_bytes(),
PALLET_OPERATING_MODE_VALUE_NAME.as_bytes(),
)
.to_vec(),
)
}
/// Storage key of the `CurrentAuthoritySet` variable in the runtime storage.
pub fn current_authority_set_key(pallet_prefix: &str) -> StorageKey {
StorageKey(
bp_runtime::storage_value_final_key(
pallet_prefix.as_bytes(),
CURRENT_AUTHORITY_SET_VALUE_NAME.as_bytes(),
)
.to_vec(),
)
}
/// Storage key of the best finalized header number and hash value in the runtime storage.
pub fn best_finalized_key(pallet_prefix: &str) -> StorageKey {
StorageKey(
bp_runtime::storage_value_final_key(
pallet_prefix.as_bytes(),
BEST_FINALIZED_VALUE_NAME.as_bytes(),
)
.to_vec(),
)
}
#[cfg(test)]
mod tests {
use super::*;
use hex_literal::hex;
#[test]
fn pallet_operating_mode_key_computed_properly() {
// If this test fails, then something has been changed in module storage that is breaking
// compatibility with previous pallet.
let storage_key = pallet_operating_mode_key("BridgeGrandpa").0;
assert_eq!(
storage_key,
hex!("0b06f475eddb98cf933a12262e0388de0f4cf0917788d791142ff6c1f216e7b3").to_vec(),
"Unexpected storage key: {}",
hex::encode(&storage_key),
);
}
#[test]
fn current_authority_set_key_computed_properly() {
// If this test fails, then something has been changed in module storage that is breaking
// compatibility with previous pallet.
let storage_key = current_authority_set_key("BridgeGrandpa").0;
assert_eq!(
storage_key,
hex!("0b06f475eddb98cf933a12262e0388de24a7b8b5717ea33346fa595a66ccbcb0").to_vec(),
"Unexpected storage key: {}",
hex::encode(&storage_key),
);
}
#[test]
fn best_finalized_key_computed_properly() {
// If this test fails, then something has been changed in module storage that is breaking
// compatibility with previous pallet.
let storage_key = best_finalized_key("BridgeGrandpa").0;
assert_eq!(
storage_key,
hex!("0b06f475eddb98cf933a12262e0388dea4ebafdd473c549fdb24c5c991c5591c").to_vec(),
"Unexpected storage key: {}",
hex::encode(&storage_key),
);
}
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Tests inside this module are made to ensure that our custom justification verification
//! implementation works similar to the [`finality_grandpa::validate_commit`] and explicitly
//! show where we behave different.
//!
//! Some of tests in this module may partially duplicate tests from `justification.rs`,
//! but their purpose is different.
use bp_header_chain::justification::{
verify_justification, GrandpaJustification, JustificationVerificationContext,
JustificationVerificationError, PrecommitError,
};
use bp_test_utils::{
header_id, make_justification_for_header, signed_precommit, test_header, Account,
JustificationGeneratorParams, ALICE, BOB, CHARLIE, DAVE, EVE, FERDIE, TEST_GRANDPA_SET_ID,
};
use finality_grandpa::voter_set::VoterSet;
use sp_consensus_grandpa::{AuthorityId, AuthorityWeight, SetId};
use sp_runtime::traits::Header as HeaderT;
type TestHeader = sp_runtime::testing::Header;
type TestHash = <TestHeader as HeaderT>::Hash;
type TestNumber = <TestHeader as HeaderT>::Number;
/// Implementation of `finality_grandpa::Chain` that is used in tests.
struct AncestryChain(bp_header_chain::justification::AncestryChain<TestHeader>);
impl AncestryChain {
fn new(justification: &GrandpaJustification<TestHeader>) -> Self {
Self(bp_header_chain::justification::AncestryChain::new(justification).0)
}
}
impl finality_grandpa::Chain<TestHash, TestNumber> for AncestryChain {
fn ancestry(
&self,
base: TestHash,
block: TestHash,
) -> Result<Vec<TestHash>, finality_grandpa::Error> {
let mut route = Vec::new();
let mut current_hash = block;
loop {
if current_hash == base {
break
}
match self.0.parent_hash_of(&current_hash) {
Some(parent_hash) => {
current_hash = *parent_hash;
route.push(current_hash);
},
_ => return Err(finality_grandpa::Error::NotDescendent),
}
}
route.pop(); // remove the base
Ok(route)
}
}
/// Get a full set of accounts.
fn full_accounts_set() -> Vec<(Account, AuthorityWeight)> {
vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1), (EVE, 1)]
}
/// Get a full set of GRANDPA authorities.
fn full_voter_set() -> VoterSet<AuthorityId> {
VoterSet::new(full_accounts_set().iter().map(|(id, w)| (AuthorityId::from(*id), *w))).unwrap()
}
pub fn full_verification_context(set_id: SetId) -> JustificationVerificationContext {
let voter_set = full_voter_set();
JustificationVerificationContext { voter_set, authority_set_id: set_id }
}
/// Get a minimal set of accounts.
fn minimal_accounts_set() -> Vec<(Account, AuthorityWeight)> {
// there are 5 accounts in the full set => we need 2/3 + 1 accounts, which results in 4 accounts
vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1)]
}
/// Make a valid GRANDPA justification with sensible defaults.
pub fn make_default_justification(header: &TestHeader) -> GrandpaJustification<TestHeader> {
make_justification_for_header(JustificationGeneratorParams {
header: header.clone(),
authorities: minimal_accounts_set(),
..Default::default()
})
}
// the `finality_grandpa::validate_commit` function has two ways to report an unsuccessful
// commit validation:
//
// 1) to return `Err()` (which only may happen if `finality_grandpa::Chain` implementation returns
// an error);
// 2) to return `Ok(validation_result)` if `validation_result.is_valid()` is false.
//
// Our implementation would just return error in both cases.
#[test]
fn same_result_when_precommit_target_has_lower_number_than_commit_target() {
let mut justification = make_default_justification(&test_header(1));
// the number of header in precommit (0) is lower than number of header in commit (1)
justification.commit.precommits[0].precommit.target_number = 0;
// our implementation returns an error
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(1),
&full_verification_context(TEST_GRANDPA_SET_ID),
&justification,
),
Err(JustificationVerificationError::Precommit(PrecommitError::UnrelatedAncestryVote)),
);
// original implementation returns `Ok(validation_result)`
// with `validation_result.is_valid() == false`.
let result = finality_grandpa::validate_commit(
&justification.commit,
&full_voter_set(),
&AncestryChain::new(&justification),
)
.unwrap();
assert!(!result.is_valid());
}
#[test]
fn same_result_when_precommit_target_is_not_descendant_of_commit_target() {
let not_descendant = test_header::<TestHeader>(10);
let mut justification = make_default_justification(&test_header(1));
// the route from header of commit (1) to header of precommit (10) is missing from
// the votes ancestries
justification.commit.precommits[0].precommit.target_number = *not_descendant.number();
justification.commit.precommits[0].precommit.target_hash = not_descendant.hash();
justification.votes_ancestries.push(not_descendant);
// our implementation returns an error
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(1),
&full_verification_context(TEST_GRANDPA_SET_ID),
&justification,
),
Err(JustificationVerificationError::Precommit(PrecommitError::UnrelatedAncestryVote)),
);
// original implementation returns `Ok(validation_result)`
// with `validation_result.is_valid() == false`.
let result = finality_grandpa::validate_commit(
&justification.commit,
&full_voter_set(),
&AncestryChain::new(&justification),
)
.unwrap();
assert!(!result.is_valid());
}
#[test]
fn same_result_when_there_are_not_enough_cumulative_weight_to_finalize_commit_target() {
// just remove one authority from the minimal set and we shall not reach the threshold
let mut authorities_set = minimal_accounts_set();
authorities_set.pop();
let justification = make_justification_for_header(JustificationGeneratorParams {
header: test_header(1),
authorities: authorities_set,
..Default::default()
});
// our implementation returns an error
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(1),
&full_verification_context(TEST_GRANDPA_SET_ID),
&justification,
),
Err(JustificationVerificationError::TooLowCumulativeWeight),
);
// original implementation returns `Ok(validation_result)`
// with `validation_result.is_valid() == false`.
let result = finality_grandpa::validate_commit(
&justification.commit,
&full_voter_set(),
&AncestryChain::new(&justification),
)
.unwrap();
assert!(!result.is_valid());
}
// tests below are our differences with the original implementation
#[test]
fn different_result_when_justification_contains_duplicate_vote() {
let mut justification = make_justification_for_header(JustificationGeneratorParams {
header: test_header(1),
authorities: minimal_accounts_set(),
ancestors: 0,
..Default::default()
});
// the justification may contain exactly the same vote (i.e. same precommit and same signature)
// multiple times && it isn't treated as an error by original implementation
let last_precommit = justification.commit.precommits.pop().unwrap();
justification.commit.precommits.push(justification.commit.precommits[0].clone());
justification.commit.precommits.push(last_precommit);
// our implementation fails
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(1),
&full_verification_context(TEST_GRANDPA_SET_ID),
&justification,
),
Err(JustificationVerificationError::Precommit(PrecommitError::DuplicateAuthorityVote)),
);
// original implementation returns `Ok(validation_result)`
// with `validation_result.is_valid() == true`.
let result = finality_grandpa::validate_commit(
&justification.commit,
&full_voter_set(),
&AncestryChain::new(&justification),
)
.unwrap();
assert!(result.is_valid());
}
#[test]
fn different_results_when_authority_equivocates_once_in_a_round() {
let mut justification = make_justification_for_header(JustificationGeneratorParams {
header: test_header(1),
authorities: minimal_accounts_set(),
ancestors: 0,
..Default::default()
});
// the justification original implementation allows authority to submit two different
// votes in a single round, of which only first is 'accepted'
let last_precommit = justification.commit.precommits.pop().unwrap();
justification.commit.precommits.push(signed_precommit::<TestHeader>(
&ALICE,
header_id::<TestHeader>(1),
justification.round,
TEST_GRANDPA_SET_ID,
));
justification.commit.precommits.push(last_precommit);
// our implementation fails
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(1),
&full_verification_context(TEST_GRANDPA_SET_ID),
&justification,
),
Err(JustificationVerificationError::Precommit(PrecommitError::DuplicateAuthorityVote)),
);
// original implementation returns `Ok(validation_result)`
// with `validation_result.is_valid() == true`.
let result = finality_grandpa::validate_commit(
&justification.commit,
&full_voter_set(),
&AncestryChain::new(&justification),
)
.unwrap();
assert!(result.is_valid());
}
#[test]
fn different_results_when_authority_equivocates_twice_in_a_round() {
let mut justification = make_justification_for_header(JustificationGeneratorParams {
header: test_header(1),
authorities: minimal_accounts_set(),
ancestors: 0,
..Default::default()
});
// there's some code in the original implementation that should return an error when
// same authority submits more than two different votes in a single round:
// https://github.com/paritytech/finality-grandpa/blob/6aeea2d1159d0f418f0b86e70739f2130629ca09/src/lib.rs#L473
// but there's also a code that prevents this from happening:
// https://github.com/paritytech/finality-grandpa/blob/6aeea2d1159d0f418f0b86e70739f2130629ca09/src/round.rs#L287
// => so now we are also just ignoring all votes from the same authority, except the first one
let last_precommit = justification.commit.precommits.pop().unwrap();
let prev_last_precommit = justification.commit.precommits.pop().unwrap();
justification.commit.precommits.push(signed_precommit::<TestHeader>(
&ALICE,
header_id::<TestHeader>(1),
justification.round,
TEST_GRANDPA_SET_ID,
));
justification.commit.precommits.push(signed_precommit::<TestHeader>(
&ALICE,
header_id::<TestHeader>(1),
justification.round,
TEST_GRANDPA_SET_ID,
));
justification.commit.precommits.push(last_precommit);
justification.commit.precommits.push(prev_last_precommit);
// our implementation fails
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(1),
&full_verification_context(TEST_GRANDPA_SET_ID),
&justification,
),
Err(JustificationVerificationError::Precommit(PrecommitError::DuplicateAuthorityVote)),
);
// original implementation returns `Ok(validation_result)`
// with `validation_result.is_valid() == true`.
let result = finality_grandpa::validate_commit(
&justification.commit,
&full_voter_set(),
&AncestryChain::new(&justification),
)
.unwrap();
assert!(result.is_valid());
}
#[test]
fn different_results_when_there_are_more_than_enough_votes() {
let mut justification = make_justification_for_header(JustificationGeneratorParams {
header: test_header(1),
authorities: minimal_accounts_set(),
ancestors: 0,
..Default::default()
});
// the reference implementation just keep verifying signatures even if we have
// collected enough votes. We are not
justification.commit.precommits.push(signed_precommit::<TestHeader>(
&EVE,
header_id::<TestHeader>(1),
justification.round,
TEST_GRANDPA_SET_ID,
));
// our implementation fails
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(1),
&full_verification_context(TEST_GRANDPA_SET_ID),
&justification,
),
Err(JustificationVerificationError::Precommit(PrecommitError::RedundantAuthorityVote)),
);
// original implementation returns `Ok(validation_result)`
// with `validation_result.is_valid() == true`.
let result = finality_grandpa::validate_commit(
&justification.commit,
&full_voter_set(),
&AncestryChain::new(&justification),
)
.unwrap();
assert!(result.is_valid());
}
#[test]
fn different_results_when_there_is_a_vote_of_unknown_authority() {
let mut justification = make_justification_for_header(JustificationGeneratorParams {
header: test_header(1),
authorities: minimal_accounts_set(),
ancestors: 0,
..Default::default()
});
// the reference implementation just keep verifying signatures even if we have
// collected enough votes. We are not
let last_precommit = justification.commit.precommits.pop().unwrap();
justification.commit.precommits.push(signed_precommit::<TestHeader>(
&FERDIE,
header_id::<TestHeader>(1),
justification.round,
TEST_GRANDPA_SET_ID,
));
justification.commit.precommits.push(last_precommit);
// our implementation fails
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(1),
&full_verification_context(TEST_GRANDPA_SET_ID),
&justification,
),
Err(JustificationVerificationError::Precommit(PrecommitError::UnknownAuthorityVote)),
);
// original implementation returns `Ok(validation_result)`
// with `validation_result.is_valid() == true`.
let result = finality_grandpa::validate_commit(
&justification.commit,
&full_voter_set(),
&AncestryChain::new(&justification),
)
.unwrap();
assert!(result.is_valid());
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Tests for Grandpa equivocations collector code.
use bp_header_chain::justification::EquivocationsCollector;
use bp_test_utils::*;
use finality_grandpa::Precommit;
use sp_consensus_grandpa::EquivocationProof;
type TestHeader = sp_runtime::testing::Header;
#[test]
fn duplicate_votes_are_not_considered_equivocations() {
let verification_context = verification_context(TEST_GRANDPA_SET_ID);
let base_justification = make_default_justification::<TestHeader>(&test_header(1));
let mut collector =
EquivocationsCollector::new(&verification_context, &base_justification).unwrap();
collector.parse_justifications(&[base_justification.clone()]);
assert_eq!(collector.into_equivocation_proofs().len(), 0);
}
#[test]
fn equivocations_are_detected_in_base_justification_redundant_votes() {
let mut base_justification = make_default_justification::<TestHeader>(&test_header(1));
let first_vote = base_justification.commit.precommits[0].clone();
let equivocation = signed_precommit::<TestHeader>(
&ALICE,
header_id::<TestHeader>(1),
base_justification.round,
TEST_GRANDPA_SET_ID,
);
base_justification.commit.precommits.push(equivocation.clone());
let verification_context = verification_context(TEST_GRANDPA_SET_ID);
let collector =
EquivocationsCollector::new(&verification_context, &base_justification).unwrap();
assert_eq!(
collector.into_equivocation_proofs(),
vec![EquivocationProof::new(
1,
sp_consensus_grandpa::Equivocation::Precommit(finality_grandpa::Equivocation {
round_number: 1,
identity: ALICE.into(),
first: (
Precommit {
target_hash: first_vote.precommit.target_hash,
target_number: first_vote.precommit.target_number
},
first_vote.signature
),
second: (
Precommit {
target_hash: equivocation.precommit.target_hash,
target_number: equivocation.precommit.target_number
},
equivocation.signature
)
})
)]
);
}
#[test]
fn equivocations_are_detected_in_extra_justification_redundant_votes() {
let base_justification = make_default_justification::<TestHeader>(&test_header(1));
let first_vote = base_justification.commit.precommits[0].clone();
let mut extra_justification = base_justification.clone();
let equivocation = signed_precommit::<TestHeader>(
&ALICE,
header_id::<TestHeader>(1),
base_justification.round,
TEST_GRANDPA_SET_ID,
);
extra_justification.commit.precommits.push(equivocation.clone());
let verification_context = verification_context(TEST_GRANDPA_SET_ID);
let mut collector =
EquivocationsCollector::new(&verification_context, &base_justification).unwrap();
collector.parse_justifications(&[extra_justification]);
assert_eq!(
collector.into_equivocation_proofs(),
vec![EquivocationProof::new(
1,
sp_consensus_grandpa::Equivocation::Precommit(finality_grandpa::Equivocation {
round_number: 1,
identity: ALICE.into(),
first: (
Precommit {
target_hash: first_vote.precommit.target_hash,
target_number: first_vote.precommit.target_number
},
first_vote.signature
),
second: (
Precommit {
target_hash: equivocation.precommit.target_hash,
target_number: equivocation.precommit.target_number
},
equivocation.signature
)
})
)]
);
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Tests for Grandpa Justification optimizer code.
use bp_header_chain::justification::verify_and_optimize_justification;
use bp_test_utils::*;
use finality_grandpa::SignedPrecommit;
use sp_consensus_grandpa::AuthoritySignature;
type TestHeader = sp_runtime::testing::Header;
#[test]
fn optimizer_does_noting_with_minimal_justification() {
let mut justification = make_default_justification::<TestHeader>(&test_header(1));
let num_precommits_before = justification.commit.precommits.len();
verify_and_optimize_justification::<TestHeader>(
header_id::<TestHeader>(1),
&verification_context(TEST_GRANDPA_SET_ID),
&mut justification,
)
.unwrap();
let num_precommits_after = justification.commit.precommits.len();
assert_eq!(num_precommits_before, num_precommits_after);
}
#[test]
fn unknown_authority_votes_are_removed_by_optimizer() {
let mut justification = make_default_justification::<TestHeader>(&test_header(1));
justification.commit.precommits.push(signed_precommit::<TestHeader>(
&bp_test_utils::Account(42),
header_id::<TestHeader>(1),
justification.round,
TEST_GRANDPA_SET_ID,
));
let num_precommits_before = justification.commit.precommits.len();
verify_and_optimize_justification::<TestHeader>(
header_id::<TestHeader>(1),
&verification_context(TEST_GRANDPA_SET_ID),
&mut justification,
)
.unwrap();
let num_precommits_after = justification.commit.precommits.len();
assert_eq!(num_precommits_before - 1, num_precommits_after);
}
#[test]
fn duplicate_authority_votes_are_removed_by_optimizer() {
let mut justification = make_default_justification::<TestHeader>(&test_header(1));
justification
.commit
.precommits
.push(justification.commit.precommits.first().cloned().unwrap());
let num_precommits_before = justification.commit.precommits.len();
verify_and_optimize_justification::<TestHeader>(
header_id::<TestHeader>(1),
&verification_context(TEST_GRANDPA_SET_ID),
&mut justification,
)
.unwrap();
let num_precommits_after = justification.commit.precommits.len();
assert_eq!(num_precommits_before - 1, num_precommits_after);
}
#[test]
fn invalid_authority_signatures_are_removed_by_optimizer() {
let mut justification = make_default_justification::<TestHeader>(&test_header(1));
let target = header_id::<TestHeader>(1);
let invalid_raw_signature: Vec<u8> = ALICE.sign(b"").to_bytes().into();
justification.commit.precommits.insert(
0,
SignedPrecommit {
precommit: finality_grandpa::Precommit {
target_hash: target.0,
target_number: target.1,
},
signature: AuthoritySignature::try_from(invalid_raw_signature).unwrap(),
id: ALICE.into(),
},
);
let num_precommits_before = justification.commit.precommits.len();
verify_and_optimize_justification::<TestHeader>(
header_id::<TestHeader>(1),
&verification_context(TEST_GRANDPA_SET_ID),
&mut justification,
)
.unwrap();
let num_precommits_after = justification.commit.precommits.len();
assert_eq!(num_precommits_before - 1, num_precommits_after);
}
#[test]
fn redundant_authority_votes_are_removed_by_optimizer() {
let mut justification = make_default_justification::<TestHeader>(&test_header(1));
justification.commit.precommits.push(signed_precommit::<TestHeader>(
&EVE,
header_id::<TestHeader>(1),
justification.round,
TEST_GRANDPA_SET_ID,
));
let num_precommits_before = justification.commit.precommits.len();
verify_and_optimize_justification::<TestHeader>(
header_id::<TestHeader>(1),
&verification_context(TEST_GRANDPA_SET_ID),
&mut justification,
)
.unwrap();
let num_precommits_after = justification.commit.precommits.len();
assert_eq!(num_precommits_before - 1, num_precommits_after);
}
#[test]
fn unrelated_ancestry_votes_are_removed_by_optimizer() {
let mut justification = make_default_justification::<TestHeader>(&test_header(2));
justification.commit.precommits.insert(
0,
signed_precommit::<TestHeader>(
&ALICE,
header_id::<TestHeader>(1),
justification.round,
TEST_GRANDPA_SET_ID,
),
);
let num_precommits_before = justification.commit.precommits.len();
verify_and_optimize_justification::<TestHeader>(
header_id::<TestHeader>(2),
&verification_context(TEST_GRANDPA_SET_ID),
&mut justification,
)
.unwrap();
let num_precommits_after = justification.commit.precommits.len();
assert_eq!(num_precommits_before - 1, num_precommits_after);
}
#[test]
fn duplicate_votes_ancestries_are_removed_by_optimizer() {
let mut justification = make_default_justification::<TestHeader>(&test_header(1));
let optimized_votes_ancestries = justification.votes_ancestries.clone();
justification.votes_ancestries = justification
.votes_ancestries
.into_iter()
.flat_map(|item| std::iter::repeat(item).take(3))
.collect();
verify_and_optimize_justification::<TestHeader>(
header_id::<TestHeader>(1),
&verification_context(TEST_GRANDPA_SET_ID),
&mut justification,
)
.unwrap();
assert_eq!(justification.votes_ancestries, optimized_votes_ancestries);
}
#[test]
fn redundant_votes_ancestries_are_removed_by_optimizer() {
let mut justification = make_default_justification::<TestHeader>(&test_header(1));
justification.votes_ancestries.push(test_header(100));
let num_votes_ancestries_before = justification.votes_ancestries.len();
verify_and_optimize_justification::<TestHeader>(
header_id::<TestHeader>(1),
&verification_context(TEST_GRANDPA_SET_ID),
&mut justification,
)
.unwrap();
let num_votes_ancestries_after = justification.votes_ancestries.len();
assert_eq!(num_votes_ancestries_before - 1, num_votes_ancestries_after);
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Tests for Grandpa strict justification verifier code.
use bp_header_chain::justification::{
required_justification_precommits, verify_justification, JustificationVerificationContext,
JustificationVerificationError, PrecommitError,
};
use bp_test_utils::*;
type TestHeader = sp_runtime::testing::Header;
#[test]
fn valid_justification_accepted() {
let authorities = vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1)];
let params = JustificationGeneratorParams {
header: test_header(1),
round: TEST_GRANDPA_ROUND,
set_id: TEST_GRANDPA_SET_ID,
authorities: authorities.clone(),
ancestors: 7,
forks: 3,
};
let justification = make_justification_for_header::<TestHeader>(params.clone());
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(1),
&verification_context(TEST_GRANDPA_SET_ID),
&justification,
),
Ok(()),
);
assert_eq!(justification.commit.precommits.len(), authorities.len());
assert_eq!(justification.votes_ancestries.len(), params.ancestors as usize);
}
#[test]
fn valid_justification_accepted_with_single_fork() {
let params = JustificationGeneratorParams {
header: test_header(1),
round: TEST_GRANDPA_ROUND,
set_id: TEST_GRANDPA_SET_ID,
authorities: vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1)],
ancestors: 5,
forks: 1,
};
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(1),
&verification_context(TEST_GRANDPA_SET_ID),
&make_justification_for_header::<TestHeader>(params)
),
Ok(()),
);
}
#[test]
fn valid_justification_accepted_with_arbitrary_number_of_authorities() {
use finality_grandpa::voter_set::VoterSet;
use sp_consensus_grandpa::AuthorityId;
let n = 15;
let required_signatures = required_justification_precommits(n as _);
let authorities = accounts(n).iter().map(|k| (*k, 1)).collect::<Vec<_>>();
let params = JustificationGeneratorParams {
header: test_header(1),
round: TEST_GRANDPA_ROUND,
set_id: TEST_GRANDPA_SET_ID,
authorities: authorities.clone().into_iter().take(required_signatures as _).collect(),
ancestors: n.into(),
forks: required_signatures,
};
let authorities = authorities
.iter()
.map(|(id, w)| (AuthorityId::from(*id), *w))
.collect::<Vec<(AuthorityId, _)>>();
let voter_set = VoterSet::new(authorities).unwrap();
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(1),
&JustificationVerificationContext { voter_set, authority_set_id: TEST_GRANDPA_SET_ID },
&make_justification_for_header::<TestHeader>(params)
),
Ok(()),
);
}
#[test]
fn justification_with_invalid_target_rejected() {
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(2),
&verification_context(TEST_GRANDPA_SET_ID),
&make_default_justification::<TestHeader>(&test_header(1)),
),
Err(JustificationVerificationError::InvalidJustificationTarget),
);
}
#[test]
fn justification_with_invalid_commit_rejected() {
let mut justification = make_default_justification::<TestHeader>(&test_header(1));
justification.commit.precommits.clear();
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(1),
&verification_context(TEST_GRANDPA_SET_ID),
&justification,
),
Err(JustificationVerificationError::TooLowCumulativeWeight),
);
}
#[test]
fn justification_with_invalid_authority_signature_rejected() {
let mut justification = make_default_justification::<TestHeader>(&test_header(1));
justification.commit.precommits[0].signature =
sp_core::crypto::UncheckedFrom::unchecked_from([1u8; 64]);
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(1),
&verification_context(TEST_GRANDPA_SET_ID),
&justification,
),
Err(JustificationVerificationError::Precommit(PrecommitError::InvalidAuthoritySignature)),
);
}
#[test]
fn justification_with_duplicate_votes_ancestry() {
let mut justification = make_default_justification::<TestHeader>(&test_header(1));
justification.votes_ancestries.push(justification.votes_ancestries[0].clone());
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(1),
&verification_context(TEST_GRANDPA_SET_ID),
&justification,
),
Err(JustificationVerificationError::DuplicateVotesAncestries),
);
}
#[test]
fn justification_with_redundant_votes_ancestry() {
let mut justification = make_default_justification::<TestHeader>(&test_header(1));
justification.votes_ancestries.push(test_header(10));
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(1),
&verification_context(TEST_GRANDPA_SET_ID),
&justification,
),
Err(JustificationVerificationError::RedundantVotesAncestries),
);
}
#[test]
fn justification_is_invalid_if_we_dont_meet_threshold() {
// Need at least three authorities to sign off or else the voter set threshold can't be reached
let authorities = vec![(ALICE, 1), (BOB, 1)];
let params = JustificationGeneratorParams {
header: test_header(1),
round: TEST_GRANDPA_ROUND,
set_id: TEST_GRANDPA_SET_ID,
authorities: authorities.clone(),
ancestors: 2 * authorities.len() as u32,
forks: 2,
};
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(1),
&verification_context(TEST_GRANDPA_SET_ID),
&make_justification_for_header::<TestHeader>(params)
),
Err(JustificationVerificationError::TooLowCumulativeWeight),
);
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
mod justification {
mod equivocation;
mod optimizer;
mod strict;
}
mod implementation_match;
[package]
name = "bp-messages"
description = "Primitives of messages module."
version = "0.7.0"
authors.workspace = true
edition.workspace = true
license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
[lints]
workspace = true
[dependencies]
codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] }
scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] }
serde = { features = ["alloc", "derive"], workspace = true }
# Bridge dependencies
bp-runtime = { path = "../runtime", default-features = false }
bp-header-chain = { path = "../header-chain", default-features = false }
# Substrate Dependencies
frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
[dev-dependencies]
hex = "0.4"
hex-literal = "0.4"
[features]
default = ["std"]
std = [
"bp-header-chain/std",
"bp-runtime/std",
"codec/std",
"frame-support/std",
"scale-info/std",
"serde/std",
"sp-core/std",
"sp-std/std",
]
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Primitives of messages module.
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
use bp_header_chain::HeaderChainError;
use bp_runtime::{
messages::MessageDispatchResult, BasicOperatingMode, Chain, OperatingMode, RangeInclusiveExt,
StorageProofError, UnderlyingChainOf, UnderlyingChainProvider,
};
use codec::{Decode, Encode, MaxEncodedLen};
use frame_support::PalletError;
// Weight is reexported to avoid additional frame-support dependencies in related crates.
pub use frame_support::weights::Weight;
use scale_info::TypeInfo;
use serde::{Deserialize, Serialize};
use source_chain::RelayersRewards;
use sp_core::{RuntimeDebug, TypeId};
use sp_std::{collections::vec_deque::VecDeque, ops::RangeInclusive, prelude::*};
pub mod source_chain;
pub mod storage_keys;
pub mod target_chain;
/// Substrate-based chain with messaging support.
pub trait ChainWithMessages: Chain {
/// Name of the bridge messages pallet (used in `construct_runtime` macro call) that is
/// deployed at some other chain to bridge with this `ChainWithMessages`.
///
/// We assume that all chains that are bridging with this `ChainWithMessages` are using
/// the same name.
const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str;
/// Maximal number of unrewarded relayers in a single confirmation transaction at this
/// `ChainWithMessages`.
const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce;
/// Maximal number of unconfirmed messages in a single confirmation transaction at this
/// `ChainWithMessages`.
const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce;
}
impl<T> ChainWithMessages for T
where
T: Chain + UnderlyingChainProvider,
UnderlyingChainOf<T>: ChainWithMessages,
{
const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str =
UnderlyingChainOf::<T>::WITH_CHAIN_MESSAGES_PALLET_NAME;
const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce =
UnderlyingChainOf::<T>::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX;
const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce =
UnderlyingChainOf::<T>::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX;
}
/// Messages pallet operating mode.
#[derive(
Encode,
Decode,
Clone,
Copy,
PartialEq,
Eq,
RuntimeDebug,
TypeInfo,
MaxEncodedLen,
Serialize,
Deserialize,
)]
pub enum MessagesOperatingMode {
/// Basic operating mode (Normal/Halted)
Basic(BasicOperatingMode),
/// The pallet is not accepting outbound messages. Inbound messages and receiving proofs
/// are still accepted.
///
/// This mode may be used e.g. when bridged chain expects upgrade. Then to avoid dispatch
/// failures, the pallet owner may stop accepting new messages, while continuing to deliver
/// queued messages to the bridged chain. Once upgrade is completed, the mode may be switched
/// back to `Normal`.
RejectingOutboundMessages,
}
impl Default for MessagesOperatingMode {
fn default() -> Self {
MessagesOperatingMode::Basic(BasicOperatingMode::Normal)
}
}
impl OperatingMode for MessagesOperatingMode {
fn is_halted(&self) -> bool {
match self {
Self::Basic(operating_mode) => operating_mode.is_halted(),
_ => false,
}
}
}
/// Lane id which implements `TypeId`.
#[derive(
Clone, Copy, Decode, Default, Encode, Eq, Ord, PartialOrd, PartialEq, TypeInfo, MaxEncodedLen,
)]
pub struct LaneId(pub [u8; 4]);
impl core::fmt::Debug for LaneId {
fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result {
self.0.fmt(fmt)
}
}
impl AsRef<[u8]> for LaneId {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl TypeId for LaneId {
const TYPE_ID: [u8; 4] = *b"blan";
}
/// Message nonce. Valid messages will never have 0 nonce.
pub type MessageNonce = u64;
/// Message id as a tuple.
pub type BridgeMessageId = (LaneId, MessageNonce);
/// Opaque message payload. We only decode this payload when it is dispatched.
pub type MessagePayload = Vec<u8>;
/// Message key (unique message identifier) as it is stored in the storage.
#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
pub struct MessageKey {
/// ID of the message lane.
pub lane_id: LaneId,
/// Message nonce.
pub nonce: MessageNonce,
}
/// Message as it is stored in the storage.
#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)]
pub struct Message {
/// Message key.
pub key: MessageKey,
/// Message payload.
pub payload: MessagePayload,
}
/// Inbound lane data.
#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)]
pub struct InboundLaneData<RelayerId> {
/// Identifiers of relayers and messages that they have delivered to this lane (ordered by
/// message nonce).
///
/// This serves as a helper storage item, to allow the source chain to easily pay rewards
/// to the relayers who successfully delivered messages to the target chain (inbound lane).
///
/// It is guaranteed to have at most N entries, where N is configured at the module level.
/// If there are N entries in this vec, then:
/// 1) all incoming messages are rejected if they're missing corresponding
/// `proof-of(outbound-lane.state)`; 2) all incoming messages are rejected if
/// `proof-of(outbound-lane.state).last_delivered_nonce` is equal to
/// `self.last_confirmed_nonce`. Given what is said above, all nonces in this queue are in
/// range: `(self.last_confirmed_nonce; self.last_delivered_nonce()]`.
///
/// When a relayer sends a single message, both of MessageNonces are the same.
/// When relayer sends messages in a batch, the first arg is the lowest nonce, second arg the
/// highest nonce. Multiple dispatches from the same relayer are allowed.
pub relayers: VecDeque<UnrewardedRelayer<RelayerId>>,
/// Nonce of the last message that
/// a) has been delivered to the target (this) chain and
/// b) the delivery has been confirmed on the source chain
///
/// that the target chain knows of.
///
/// This value is updated indirectly when an `OutboundLane` state of the source
/// chain is received alongside with new messages delivery.
pub last_confirmed_nonce: MessageNonce,
}
impl<RelayerId> Default for InboundLaneData<RelayerId> {
fn default() -> Self {
InboundLaneData { relayers: VecDeque::new(), last_confirmed_nonce: 0 }
}
}
impl<RelayerId> InboundLaneData<RelayerId> {
/// Returns approximate size of the struct, given a number of entries in the `relayers` set and
/// size of each entry.
///
/// Returns `None` if size overflows `usize` limits.
pub fn encoded_size_hint(relayers_entries: usize) -> Option<usize>
where
RelayerId: MaxEncodedLen,
{
relayers_entries
.checked_mul(UnrewardedRelayer::<RelayerId>::max_encoded_len())?
.checked_add(MessageNonce::max_encoded_len())
}
/// Returns the approximate size of the struct as u32, given a number of entries in the
/// `relayers` set and the size of each entry.
///
/// Returns `u32::MAX` if size overflows `u32` limits.
pub fn encoded_size_hint_u32(relayers_entries: usize) -> u32
where
RelayerId: MaxEncodedLen,
{
Self::encoded_size_hint(relayers_entries)
.and_then(|x| u32::try_from(x).ok())
.unwrap_or(u32::MAX)
}
/// Nonce of the last message that has been delivered to this (target) chain.
pub fn last_delivered_nonce(&self) -> MessageNonce {
self.relayers
.back()
.map(|entry| entry.messages.end)
.unwrap_or(self.last_confirmed_nonce)
}
/// Returns the total number of messages in the `relayers` vector,
/// saturating in case of underflow or overflow.
pub fn total_unrewarded_messages(&self) -> MessageNonce {
let relayers = &self.relayers;
match (relayers.front(), relayers.back()) {
(Some(front), Some(back)) =>
(front.messages.begin..=back.messages.end).saturating_len(),
_ => 0,
}
}
}
/// Outbound message details, returned by runtime APIs.
#[derive(Clone, Encode, Decode, RuntimeDebug, PartialEq, Eq, TypeInfo)]
pub struct OutboundMessageDetails {
/// Nonce assigned to the message.
pub nonce: MessageNonce,
/// Message dispatch weight.
///
/// Depending on messages pallet configuration, it may be declared by the message submitter,
/// computed automatically or just be zero if dispatch fee is paid at the target chain.
pub dispatch_weight: Weight,
/// Size of the encoded message.
pub size: u32,
}
/// Inbound message details, returned by runtime APIs.
#[derive(Clone, Encode, Decode, RuntimeDebug, PartialEq, Eq, TypeInfo)]
pub struct InboundMessageDetails {
/// Computed message dispatch weight.
///
/// Runtime API guarantees that it will match the value, returned by
/// `target_chain::MessageDispatch::dispatch_weight`. This means that if the runtime
/// has failed to decode the message, it will be zero - that's because `undecodable`
/// message cannot be dispatched.
pub dispatch_weight: Weight,
}
/// Unrewarded relayer entry stored in the inbound lane data.
///
/// This struct represents a continuous range of messages that have been delivered by the same
/// relayer and whose confirmations are still pending.
#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo, MaxEncodedLen)]
pub struct UnrewardedRelayer<RelayerId> {
/// Identifier of the relayer.
pub relayer: RelayerId,
/// Messages range, delivered by this relayer.
pub messages: DeliveredMessages,
}
/// Received messages with their dispatch result.
#[derive(Clone, Default, Encode, Decode, RuntimeDebug, PartialEq, Eq, TypeInfo)]
pub struct ReceivedMessages<DispatchLevelResult> {
/// Id of the lane which is receiving messages.
pub lane: LaneId,
/// Result of messages which we tried to dispatch
pub receive_results: Vec<(MessageNonce, ReceivalResult<DispatchLevelResult>)>,
}
impl<DispatchLevelResult> ReceivedMessages<DispatchLevelResult> {
/// Creates new `ReceivedMessages` structure from given results.
pub fn new(
lane: LaneId,
receive_results: Vec<(MessageNonce, ReceivalResult<DispatchLevelResult>)>,
) -> Self {
ReceivedMessages { lane, receive_results }
}
/// Push `result` of the `message` delivery onto `receive_results` vector.
pub fn push(&mut self, message: MessageNonce, result: ReceivalResult<DispatchLevelResult>) {
self.receive_results.push((message, result));
}
}
/// Result of single message receival.
#[derive(RuntimeDebug, Encode, Decode, PartialEq, Eq, Clone, TypeInfo)]
pub enum ReceivalResult<DispatchLevelResult> {
/// Message has been received and dispatched. Note that we don't care whether dispatch has
/// been successful or not - in both case message falls into this category.
///
/// The message dispatch result is also returned.
Dispatched(MessageDispatchResult<DispatchLevelResult>),
/// Message has invalid nonce and lane has rejected to accept this message.
InvalidNonce,
/// There are too many unrewarded relayer entries at the lane.
TooManyUnrewardedRelayers,
/// There are too many unconfirmed messages at the lane.
TooManyUnconfirmedMessages,
}
/// Delivered messages with their dispatch result.
#[derive(Clone, Default, Encode, Decode, RuntimeDebug, PartialEq, Eq, TypeInfo, MaxEncodedLen)]
pub struct DeliveredMessages {
/// Nonce of the first message that has been delivered (inclusive).
pub begin: MessageNonce,
/// Nonce of the last message that has been delivered (inclusive).
pub end: MessageNonce,
}
impl DeliveredMessages {
/// Create new `DeliveredMessages` struct that confirms delivery of single nonce with given
/// dispatch result.
pub fn new(nonce: MessageNonce) -> Self {
DeliveredMessages { begin: nonce, end: nonce }
}
/// Return total count of delivered messages.
pub fn total_messages(&self) -> MessageNonce {
(self.begin..=self.end).saturating_len()
}
/// Note new dispatched message.
pub fn note_dispatched_message(&mut self) {
self.end += 1;
}
/// Returns true if delivered messages contain message with given nonce.
pub fn contains_message(&self, nonce: MessageNonce) -> bool {
(self.begin..=self.end).contains(&nonce)
}
}
/// Gist of `InboundLaneData::relayers` field used by runtime APIs.
#[derive(Clone, Default, Encode, Decode, RuntimeDebug, PartialEq, Eq, TypeInfo)]
pub struct UnrewardedRelayersState {
/// Number of entries in the `InboundLaneData::relayers` set.
pub unrewarded_relayer_entries: MessageNonce,
/// Number of messages in the oldest entry of `InboundLaneData::relayers`. This is the
/// minimal number of reward proofs required to push out this entry from the set.
pub messages_in_oldest_entry: MessageNonce,
/// Total number of messages in the relayers vector.
pub total_messages: MessageNonce,
/// Nonce of the latest message that has been delivered to the target chain.
///
/// This corresponds to the result of the `InboundLaneData::last_delivered_nonce` call
/// at the bridged chain.
pub last_delivered_nonce: MessageNonce,
}
impl UnrewardedRelayersState {
/// Verify that the relayers state corresponds with the `InboundLaneData`.
pub fn is_valid<RelayerId>(&self, lane_data: &InboundLaneData<RelayerId>) -> bool {
self == &lane_data.into()
}
}
impl<RelayerId> From<&InboundLaneData<RelayerId>> for UnrewardedRelayersState {
fn from(lane: &InboundLaneData<RelayerId>) -> UnrewardedRelayersState {
UnrewardedRelayersState {
unrewarded_relayer_entries: lane.relayers.len() as _,
messages_in_oldest_entry: lane
.relayers
.front()
.map(|entry| entry.messages.total_messages())
.unwrap_or(0),
total_messages: lane.total_unrewarded_messages(),
last_delivered_nonce: lane.last_delivered_nonce(),
}
}
}
/// Outbound lane data.
#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo, MaxEncodedLen)]
pub struct OutboundLaneData {
/// Nonce of the oldest message that we haven't yet pruned. May point to not-yet-generated
/// message if all sent messages are already pruned.
pub oldest_unpruned_nonce: MessageNonce,
/// Nonce of the latest message, received by bridged chain.
pub latest_received_nonce: MessageNonce,
/// Nonce of the latest message, generated by us.
pub latest_generated_nonce: MessageNonce,
}
impl Default for OutboundLaneData {
fn default() -> Self {
OutboundLaneData {
// it is 1 because we're pruning everything in [oldest_unpruned_nonce;
// latest_received_nonce]
oldest_unpruned_nonce: 1,
latest_received_nonce: 0,
latest_generated_nonce: 0,
}
}
}
impl OutboundLaneData {
/// Return nonces of all currently queued messages (i.e. messages that we believe
/// are not delivered yet).
pub fn queued_messages(&self) -> RangeInclusive<MessageNonce> {
(self.latest_received_nonce + 1)..=self.latest_generated_nonce
}
}
/// Calculate the number of messages that the relayers have delivered.
pub fn calc_relayers_rewards<AccountId>(
messages_relayers: VecDeque<UnrewardedRelayer<AccountId>>,
received_range: &RangeInclusive<MessageNonce>,
) -> RelayersRewards<AccountId>
where
AccountId: sp_std::cmp::Ord,
{
// remember to reward relayers that have delivered messages
// this loop is bounded by `T::MaxUnrewardedRelayerEntriesAtInboundLane` on the bridged chain
let mut relayers_rewards = RelayersRewards::new();
for entry in messages_relayers {
let nonce_begin = sp_std::cmp::max(entry.messages.begin, *received_range.start());
let nonce_end = sp_std::cmp::min(entry.messages.end, *received_range.end());
if nonce_end >= nonce_begin {
*relayers_rewards.entry(entry.relayer).or_default() += nonce_end - nonce_begin + 1;
}
}
relayers_rewards
}
/// A minimized version of `pallet-bridge-messages::Call` that can be used without a runtime.
#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
#[allow(non_camel_case_types)]
pub enum BridgeMessagesCall<AccountId, MessagesProof, MessagesDeliveryProof> {
/// `pallet-bridge-messages::Call::receive_messages_proof`
#[codec(index = 2)]
receive_messages_proof {
/// Account id of relayer at the **bridged** chain.
relayer_id_at_bridged_chain: AccountId,
/// Messages proof.
proof: MessagesProof,
/// A number of messages in the proof.
messages_count: u32,
/// Total dispatch weight of messages in the proof.
dispatch_weight: Weight,
},
/// `pallet-bridge-messages::Call::receive_messages_delivery_proof`
#[codec(index = 3)]
receive_messages_delivery_proof {
/// Messages delivery proof.
proof: MessagesDeliveryProof,
/// "Digest" of unrewarded relayers state at the bridged chain.
relayers_state: UnrewardedRelayersState,
},
}
/// Error that happens during message verification.
#[derive(Encode, Decode, RuntimeDebug, PartialEq, Eq, PalletError, TypeInfo)]
pub enum VerificationError {
/// The message proof is empty.
EmptyMessageProof,
/// Error returned by the bridged header chain.
HeaderChain(HeaderChainError),
/// Error returned while reading/decoding inbound lane data from the storage proof.
InboundLaneStorage(StorageProofError),
/// The declared message weight is incorrect.
InvalidMessageWeight,
/// Declared messages count doesn't match actual value.
MessagesCountMismatch,
/// Error returned while reading/decoding message data from the storage proof.
MessageStorage(StorageProofError),
/// The message is too large.
MessageTooLarge,
/// Error returned while reading/decoding outbound lane data from the storage proof.
OutboundLaneStorage(StorageProofError),
/// Storage proof related error.
StorageProof(StorageProofError),
/// Custom error
Other(#[codec(skip)] &'static str),
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn total_unrewarded_messages_does_not_overflow() {
let lane_data = InboundLaneData {
relayers: vec![
UnrewardedRelayer { relayer: 1, messages: DeliveredMessages::new(0) },
UnrewardedRelayer {
relayer: 2,
messages: DeliveredMessages::new(MessageNonce::MAX),
},
]
.into_iter()
.collect(),
last_confirmed_nonce: 0,
};
assert_eq!(lane_data.total_unrewarded_messages(), MessageNonce::MAX);
}
#[test]
fn inbound_lane_data_returns_correct_hint() {
let test_cases = vec![
// single relayer, multiple messages
(1, 128u8),
// multiple relayers, single message per relayer
(128u8, 128u8),
// several messages per relayer
(13u8, 128u8),
];
for (relayer_entries, messages_count) in test_cases {
let expected_size = InboundLaneData::<u8>::encoded_size_hint(relayer_entries as _);
let actual_size = InboundLaneData {
relayers: (1u8..=relayer_entries)
.map(|i| UnrewardedRelayer {
relayer: i,
messages: DeliveredMessages::new(i as _),
})
.collect(),
last_confirmed_nonce: messages_count as _,
}
.encode()
.len();
let difference = (expected_size.unwrap() as f64 - actual_size as f64).abs();
assert!(
difference / (std::cmp::min(actual_size, expected_size.unwrap()) as f64) < 0.1,
"Too large difference between actual ({actual_size}) and expected ({expected_size:?}) inbound lane data size. Test case: {relayer_entries}+{messages_count}",
);
}
}
#[test]
fn contains_result_works() {
let delivered_messages = DeliveredMessages { begin: 100, end: 150 };
assert!(!delivered_messages.contains_message(99));
assert!(delivered_messages.contains_message(100));
assert!(delivered_messages.contains_message(150));
assert!(!delivered_messages.contains_message(151));
}
#[test]
fn lane_id_debug_format_matches_inner_array_format() {
assert_eq!(format!("{:?}", LaneId([0, 0, 0, 0])), format!("{:?}", [0, 0, 0, 0]),);
}
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Primitives of messages module, that are used on the source chain.
use crate::{InboundLaneData, LaneId, MessageNonce, VerificationError};
use crate::UnrewardedRelayer;
use bp_runtime::Size;
use frame_support::Parameter;
use sp_core::RuntimeDebug;
use sp_std::{
collections::{btree_map::BTreeMap, vec_deque::VecDeque},
fmt::Debug,
ops::RangeInclusive,
};
/// Number of messages, delivered by relayers.
pub type RelayersRewards<AccountId> = BTreeMap<AccountId, MessageNonce>;
/// Target chain API. Used by source chain to verify target chain proofs.
///
/// All implementations of this trait should only work with finalized data that
/// can't change. Wrong implementation may lead to invalid lane states (i.e. lane
/// that's stuck) and/or processing messages without paying fees.
///
/// The `Payload` type here means the payload of the message that is sent from the
/// source chain to the target chain. The `AccountId` type here means the account
/// type used by the source chain.
pub trait TargetHeaderChain<Payload, AccountId> {
/// Proof that messages have been received by target chain.
type MessagesDeliveryProof: Parameter + Size;
/// Verify message payload before we accept it.
///
/// **CAUTION**: this is very important function. Incorrect implementation may lead
/// to stuck lanes and/or relayers loses.
///
/// The proper implementation must ensure that the delivery-transaction with this
/// payload would (at least) be accepted into target chain transaction pool AND
/// eventually will be successfully mined. The most obvious incorrect implementation
/// example would be implementation for BTC chain that accepts payloads larger than
/// 1MB. BTC nodes aren't accepting transactions that are larger than 1MB, so relayer
/// will be unable to craft valid transaction => this (and all subsequent) messages will
/// never be delivered.
fn verify_message(payload: &Payload) -> Result<(), VerificationError>;
/// Verify messages delivery proof and return lane && nonce of the latest received message.
fn verify_messages_delivery_proof(
proof: Self::MessagesDeliveryProof,
) -> Result<(LaneId, InboundLaneData<AccountId>), VerificationError>;
}
/// Manages payments that are happening at the source chain during delivery confirmation
/// transaction.
pub trait DeliveryConfirmationPayments<AccountId> {
/// Error type.
type Error: Debug + Into<&'static str>;
/// Pay rewards for delivering messages to the given relayers.
///
/// The implementation may also choose to pay reward to the `confirmation_relayer`, which is
/// a relayer that has submitted delivery confirmation transaction.
///
/// Returns number of actually rewarded relayers.
fn pay_reward(
lane_id: LaneId,
messages_relayers: VecDeque<UnrewardedRelayer<AccountId>>,
confirmation_relayer: &AccountId,
received_range: &RangeInclusive<MessageNonce>,
) -> MessageNonce;
}
impl<AccountId> DeliveryConfirmationPayments<AccountId> for () {
type Error = &'static str;
fn pay_reward(
_lane_id: LaneId,
_messages_relayers: VecDeque<UnrewardedRelayer<AccountId>>,
_confirmation_relayer: &AccountId,
_received_range: &RangeInclusive<MessageNonce>,
) -> MessageNonce {
// this implementation is not rewarding relayers at all
0
}
}
/// Callback that is called at the source chain (bridge hub) when we get delivery confirmation
/// for new messages.
pub trait OnMessagesDelivered {
/// New messages delivery has been confirmed.
///
/// The only argument of the function is the number of yet undelivered messages
fn on_messages_delivered(lane: LaneId, enqueued_messages: MessageNonce);
}
impl OnMessagesDelivered for () {
fn on_messages_delivered(_lane: LaneId, _enqueued_messages: MessageNonce) {}
}
/// Send message artifacts.
#[derive(Eq, RuntimeDebug, PartialEq)]
pub struct SendMessageArtifacts {
/// Nonce of the message.
pub nonce: MessageNonce,
/// Number of enqueued messages at the lane, after the message is sent.
pub enqueued_messages: MessageNonce,
}
/// Messages bridge API to be used from other pallets.
pub trait MessagesBridge<Payload> {
/// Error type.
type Error: Debug;
/// Intermediary structure returned by `validate_message()`.
///
/// It can than be passed to `send_message()` in order to actually send the message
/// on the bridge.
type SendMessageArgs;
/// Check if the message can be sent over the bridge.
fn validate_message(
lane: LaneId,
message: &Payload,
) -> Result<Self::SendMessageArgs, Self::Error>;
/// Send message over the bridge.
///
/// Returns unique message nonce or error if send has failed.
fn send_message(message: Self::SendMessageArgs) -> SendMessageArtifacts;
}
/// Structure that may be used in place of `TargetHeaderChain` and
/// `MessageDeliveryAndDispatchPayment` on chains, where outbound messages are forbidden.
pub struct ForbidOutboundMessages;
/// Error message that is used in `ForbidOutboundMessages` implementation.
const ALL_OUTBOUND_MESSAGES_REJECTED: &str =
"This chain is configured to reject all outbound messages";
impl<Payload, AccountId> TargetHeaderChain<Payload, AccountId> for ForbidOutboundMessages {
type MessagesDeliveryProof = ();
fn verify_message(_payload: &Payload) -> Result<(), VerificationError> {
Err(VerificationError::Other(ALL_OUTBOUND_MESSAGES_REJECTED))
}
fn verify_messages_delivery_proof(
_proof: Self::MessagesDeliveryProof,
) -> Result<(LaneId, InboundLaneData<AccountId>), VerificationError> {
Err(VerificationError::Other(ALL_OUTBOUND_MESSAGES_REJECTED))
}
}
impl<AccountId> DeliveryConfirmationPayments<AccountId> for ForbidOutboundMessages {
type Error = &'static str;
fn pay_reward(
_lane_id: LaneId,
_messages_relayers: VecDeque<UnrewardedRelayer<AccountId>>,
_confirmation_relayer: &AccountId,
_received_range: &RangeInclusive<MessageNonce>,
) -> MessageNonce {
0
}
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Storage keys of bridge messages pallet.
/// Name of the `OPERATING_MODE_VALUE_NAME` storage value.
pub const OPERATING_MODE_VALUE_NAME: &str = "PalletOperatingMode";
/// Name of the `OutboundMessages` storage map.
pub const OUTBOUND_MESSAGES_MAP_NAME: &str = "OutboundMessages";
/// Name of the `OutboundLanes` storage map.
pub const OUTBOUND_LANES_MAP_NAME: &str = "OutboundLanes";
/// Name of the `InboundLanes` storage map.
pub const INBOUND_LANES_MAP_NAME: &str = "InboundLanes";
use crate::{LaneId, MessageKey, MessageNonce};
use codec::Encode;
use frame_support::Blake2_128Concat;
use sp_core::storage::StorageKey;
/// Storage key of the `PalletOperatingMode` value in the runtime storage.
pub fn operating_mode_key(pallet_prefix: &str) -> StorageKey {
StorageKey(
bp_runtime::storage_value_final_key(
pallet_prefix.as_bytes(),
OPERATING_MODE_VALUE_NAME.as_bytes(),
)
.to_vec(),
)
}
/// Storage key of the outbound message in the runtime storage.
pub fn message_key(pallet_prefix: &str, lane: &LaneId, nonce: MessageNonce) -> StorageKey {
bp_runtime::storage_map_final_key::<Blake2_128Concat>(
pallet_prefix,
OUTBOUND_MESSAGES_MAP_NAME,
&MessageKey { lane_id: *lane, nonce }.encode(),
)
}
/// Storage key of the outbound message lane state in the runtime storage.
pub fn outbound_lane_data_key(pallet_prefix: &str, lane: &LaneId) -> StorageKey {
bp_runtime::storage_map_final_key::<Blake2_128Concat>(
pallet_prefix,
OUTBOUND_LANES_MAP_NAME,
&lane.encode(),
)
}
/// Storage key of the inbound message lane state in the runtime storage.
pub fn inbound_lane_data_key(pallet_prefix: &str, lane: &LaneId) -> StorageKey {
bp_runtime::storage_map_final_key::<Blake2_128Concat>(
pallet_prefix,
INBOUND_LANES_MAP_NAME,
&lane.encode(),
)
}
#[cfg(test)]
mod tests {
use super::*;
use hex_literal::hex;
#[test]
fn operating_mode_key_computed_properly() {
// If this test fails, then something has been changed in module storage that is possibly
// breaking all existing message relays.
let storage_key = operating_mode_key("BridgeMessages").0;
assert_eq!(
storage_key,
hex!("dd16c784ebd3390a9bc0357c7511ed010f4cf0917788d791142ff6c1f216e7b3").to_vec(),
"Unexpected storage key: {}",
hex::encode(&storage_key),
);
}
#[test]
fn storage_message_key_computed_properly() {
// If this test fails, then something has been changed in module storage that is breaking
// all previously crafted messages proofs.
let storage_key = message_key("BridgeMessages", &LaneId(*b"test"), 42).0;
assert_eq!(
storage_key,
hex!("dd16c784ebd3390a9bc0357c7511ed018a395e6242c6813b196ca31ed0547ea79446af0e09063bd4a7874aef8a997cec746573742a00000000000000").to_vec(),
"Unexpected storage key: {}",
hex::encode(&storage_key),
);
}
#[test]
fn outbound_lane_data_key_computed_properly() {
// If this test fails, then something has been changed in module storage that is breaking
// all previously crafted outbound lane state proofs.
let storage_key = outbound_lane_data_key("BridgeMessages", &LaneId(*b"test")).0;
assert_eq!(
storage_key,
hex!("dd16c784ebd3390a9bc0357c7511ed0196c246acb9b55077390e3ca723a0ca1f44a8995dd50b6657a037a7839304535b74657374").to_vec(),
"Unexpected storage key: {}",
hex::encode(&storage_key),
);
}
#[test]
fn inbound_lane_data_key_computed_properly() {
// If this test fails, then something has been changed in module storage that is breaking
// all previously crafted inbound lane state proofs.
let storage_key = inbound_lane_data_key("BridgeMessages", &LaneId(*b"test")).0;
assert_eq!(
storage_key,
hex!("dd16c784ebd3390a9bc0357c7511ed01e5f83cf83f2127eb47afdc35d6e43fab44a8995dd50b6657a037a7839304535b74657374").to_vec(),
"Unexpected storage key: {}",
hex::encode(&storage_key),
);
}
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Primitives of messages module, that are used on the target chain.
use crate::{
LaneId, Message, MessageKey, MessageNonce, MessagePayload, OutboundLaneData, VerificationError,
};
use bp_runtime::{messages::MessageDispatchResult, Size};
use codec::{Decode, Encode, Error as CodecError};
use frame_support::{weights::Weight, Parameter};
use scale_info::TypeInfo;
use sp_core::RuntimeDebug;
use sp_std::{collections::btree_map::BTreeMap, fmt::Debug, marker::PhantomData, prelude::*};
/// Proved messages from the source chain.
pub type ProvedMessages<Message> = BTreeMap<LaneId, ProvedLaneMessages<Message>>;
/// Proved messages from single lane of the source chain.
#[derive(RuntimeDebug, Encode, Decode, Clone, PartialEq, Eq, TypeInfo)]
pub struct ProvedLaneMessages<Message> {
/// Optional outbound lane state.
pub lane_state: Option<OutboundLaneData>,
/// Messages sent through this lane.
pub messages: Vec<Message>,
}
/// Message data with decoded dispatch payload.
#[derive(RuntimeDebug)]
pub struct DispatchMessageData<DispatchPayload> {
/// Result of dispatch payload decoding.
pub payload: Result<DispatchPayload, CodecError>,
}
/// Message with decoded dispatch payload.
#[derive(RuntimeDebug)]
pub struct DispatchMessage<DispatchPayload> {
/// Message key.
pub key: MessageKey,
/// Message data with decoded dispatch payload.
pub data: DispatchMessageData<DispatchPayload>,
}
/// Source chain API. Used by target chain, to verify source chain proofs.
///
/// All implementations of this trait should only work with finalized data that
/// can't change. Wrong implementation may lead to invalid lane states (i.e. lane
/// that's stuck) and/or processing messages without paying fees.
pub trait SourceHeaderChain {
/// Proof that messages are sent from source chain. This may also include proof
/// of corresponding outbound lane states.
type MessagesProof: Parameter + Size;
/// Verify messages proof and return proved messages.
///
/// Returns error if either proof is incorrect, or the number of messages in the proof
/// is not matching the `messages_count`.
///
/// Messages vector is required to be sorted by nonce within each lane. Out-of-order
/// messages will be rejected.
///
/// The `messages_count` argument verification (sane limits) is supposed to be made
/// outside this function. This function only verifies that the proof declares exactly
/// `messages_count` messages.
fn verify_messages_proof(
proof: Self::MessagesProof,
messages_count: u32,
) -> Result<ProvedMessages<Message>, VerificationError>;
}
/// Called when inbound message is received.
pub trait MessageDispatch {
/// Decoded message payload type. Valid message may contain invalid payload. In this case
/// message is delivered, but dispatch fails. Therefore, two separate types of payload
/// (opaque `MessagePayload` used in delivery and this `DispatchPayload` used in dispatch).
type DispatchPayload: Decode;
/// Fine-grained result of single message dispatch (for better diagnostic purposes)
type DispatchLevelResult: Clone + sp_std::fmt::Debug + Eq;
/// Returns `true` if dispatcher is ready to accept additional messages. The `false` should
/// be treated as a hint by both dispatcher and its consumers - i.e. dispatcher shall not
/// simply drop messages if it returns `false`. The consumer may still call the `dispatch`
/// if dispatcher has returned `false`.
///
/// We check it in the messages delivery transaction prologue. So if it becomes `false`
/// after some portion of messages is already dispatched, it doesn't fail the whole transaction.
fn is_active() -> bool;
/// Estimate dispatch weight.
///
/// This function must return correct upper bound of dispatch weight. The return value
/// of this function is expected to match return value of the corresponding
/// `From<Chain>InboundLaneApi::message_details().dispatch_weight` call.
fn dispatch_weight(message: &mut DispatchMessage<Self::DispatchPayload>) -> Weight;
/// Called when inbound message is received.
///
/// It is up to the implementers of this trait to determine whether the message
/// is invalid (i.e. improperly encoded, has too large weight, ...) or not.
fn dispatch(
message: DispatchMessage<Self::DispatchPayload>,
) -> MessageDispatchResult<Self::DispatchLevelResult>;
}
/// Manages payments that are happening at the target chain during message delivery transaction.
pub trait DeliveryPayments<AccountId> {
/// Error type.
type Error: Debug + Into<&'static str>;
/// Pay rewards for delivering messages to the given relayer.
///
/// This method is called during message delivery transaction which has been submitted
/// by the `relayer`. The transaction brings `total_messages` messages but only
/// `valid_messages` have been accepted. The post-dispatch transaction weight is the
/// `actual_weight`.
fn pay_reward(
relayer: AccountId,
total_messages: MessageNonce,
valid_messages: MessageNonce,
actual_weight: Weight,
);
}
impl<Message> Default for ProvedLaneMessages<Message> {
fn default() -> Self {
ProvedLaneMessages { lane_state: None, messages: Vec::new() }
}
}
impl<DispatchPayload: Decode> From<Message> for DispatchMessage<DispatchPayload> {
fn from(message: Message) -> Self {
DispatchMessage { key: message.key, data: message.payload.into() }
}
}
impl<DispatchPayload: Decode> From<MessagePayload> for DispatchMessageData<DispatchPayload> {
fn from(payload: MessagePayload) -> Self {
DispatchMessageData { payload: DispatchPayload::decode(&mut &payload[..]) }
}
}
impl<AccountId> DeliveryPayments<AccountId> for () {
type Error = &'static str;
fn pay_reward(
_relayer: AccountId,
_total_messages: MessageNonce,
_valid_messages: MessageNonce,
_actual_weight: Weight,
) {
// this implementation is not rewarding relayer at all
}
}
/// Structure that may be used in place of `SourceHeaderChain` and `MessageDispatch` on chains,
/// where inbound messages are forbidden.
pub struct ForbidInboundMessages<MessagesProof, DispatchPayload>(
PhantomData<(MessagesProof, DispatchPayload)>,
);
/// Error message that is used in `ForbidInboundMessages` implementation.
const ALL_INBOUND_MESSAGES_REJECTED: &str =
"This chain is configured to reject all inbound messages";
impl<MessagesProof: Parameter + Size, DispatchPayload> SourceHeaderChain
for ForbidInboundMessages<MessagesProof, DispatchPayload>
{
type MessagesProof = MessagesProof;
fn verify_messages_proof(
_proof: Self::MessagesProof,
_messages_count: u32,
) -> Result<ProvedMessages<Message>, VerificationError> {
Err(VerificationError::Other(ALL_INBOUND_MESSAGES_REJECTED))
}
}
impl<MessagesProof, DispatchPayload: Decode> MessageDispatch
for ForbidInboundMessages<MessagesProof, DispatchPayload>
{
type DispatchPayload = DispatchPayload;
type DispatchLevelResult = ();
fn is_active() -> bool {
false
}
fn dispatch_weight(_message: &mut DispatchMessage<Self::DispatchPayload>) -> Weight {
Weight::MAX
}
fn dispatch(
_: DispatchMessage<Self::DispatchPayload>,
) -> MessageDispatchResult<Self::DispatchLevelResult> {
MessageDispatchResult { unspent_weight: Weight::zero(), dispatch_level_result: () }
}
}
[package]
name = "bp-parachains"
description = "Primitives of parachains module."
version = "0.7.0"
authors.workspace = true
edition.workspace = true
license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
[lints]
workspace = true
[dependencies]
codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] }
impl-trait-for-tuples = "0.2"
scale-info = { version = "2.10.0", default-features = false, features = ["derive"] }
# Bridge dependencies
bp-header-chain = { path = "../header-chain", default-features = false }
bp-polkadot-core = { path = "../polkadot-core", default-features = false }
bp-runtime = { path = "../runtime", default-features = false }
# Substrate dependencies
frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
[features]
default = ["std"]
std = [
"bp-header-chain/std",
"bp-polkadot-core/std",
"bp-runtime/std",
"codec/std",
"frame-support/std",
"scale-info/std",
"sp-core/std",
"sp-runtime/std",
"sp-std/std",
]
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Primitives of parachains module.
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
pub use bp_header_chain::StoredHeaderData;
use bp_polkadot_core::{
parachains::{ParaHash, ParaHead, ParaHeadsProof, ParaId},
BlockNumber as RelayBlockNumber, Hash as RelayBlockHash,
};
use bp_runtime::{
BlockNumberOf, Chain, HashOf, HeaderOf, Parachain, StorageDoubleMapKeyProvider,
StorageMapKeyProvider,
};
use codec::{Decode, Encode, MaxEncodedLen};
use frame_support::{Blake2_128Concat, Twox64Concat};
use scale_info::TypeInfo;
use sp_core::storage::StorageKey;
use sp_runtime::{traits::Header as HeaderT, RuntimeDebug};
use sp_std::{marker::PhantomData, prelude::*};
/// Best known parachain head hash.
#[derive(Clone, Decode, Encode, MaxEncodedLen, PartialEq, RuntimeDebug, TypeInfo)]
pub struct BestParaHeadHash {
/// Number of relay block where this head has been read.
///
/// Parachain head is opaque to relay chain. So we can't simply decode it as a header of
/// parachains and call `block_number()` on it. Instead, we're using the fact that parachain
/// head is always built on top of previous head (because it is blockchain) and relay chain
/// always imports parachain heads in order. What it means for us is that at any given
/// **finalized** relay block `B`, head of parachain will be ancestor (or the same) of all
/// parachain heads available at descendants of `B`.
pub at_relay_block_number: RelayBlockNumber,
/// Hash of parachain head.
pub head_hash: ParaHash,
}
/// Best known parachain head as it is stored in the runtime storage.
#[derive(Decode, Encode, MaxEncodedLen, PartialEq, RuntimeDebug, TypeInfo)]
pub struct ParaInfo {
/// Best known parachain head hash.
pub best_head_hash: BestParaHeadHash,
/// Current ring buffer position for this parachain.
pub next_imported_hash_position: u32,
}
/// Returns runtime storage key of given parachain head at the source chain.
///
/// The head is stored by the `paras` pallet in the `Heads` map.
pub fn parachain_head_storage_key_at_source(
paras_pallet_name: &str,
para_id: ParaId,
) -> StorageKey {
bp_runtime::storage_map_final_key::<Twox64Concat>(paras_pallet_name, "Heads", &para_id.encode())
}
/// Can be use to access the runtime storage key of the parachains info at the target chain.
///
/// The info is stored by the `pallet-bridge-parachains` pallet in the `ParasInfo` map.
pub struct ParasInfoKeyProvider;
impl StorageMapKeyProvider for ParasInfoKeyProvider {
const MAP_NAME: &'static str = "ParasInfo";
type Hasher = Blake2_128Concat;
type Key = ParaId;
type Value = ParaInfo;
}
/// Can be use to access the runtime storage key of the parachain head at the target chain.
///
/// The head is stored by the `pallet-bridge-parachains` pallet in the `ImportedParaHeads` map.
pub struct ImportedParaHeadsKeyProvider;
impl StorageDoubleMapKeyProvider for ImportedParaHeadsKeyProvider {
const MAP_NAME: &'static str = "ImportedParaHeads";
type Hasher1 = Blake2_128Concat;
type Key1 = ParaId;
type Hasher2 = Blake2_128Concat;
type Key2 = ParaHash;
type Value = ParaStoredHeaderData;
}
/// Stored data of the parachain head. It is encoded version of the
/// `bp_runtime::StoredHeaderData` structure.
///
/// We do not know exact structure of the parachain head, so we always store encoded version
/// of the `bp_runtime::StoredHeaderData`. It is only decoded when we talk about specific parachain.
#[derive(Clone, Decode, Encode, PartialEq, RuntimeDebug, TypeInfo)]
pub struct ParaStoredHeaderData(pub Vec<u8>);
impl ParaStoredHeaderData {
/// Decode stored parachain head data.
pub fn decode_parachain_head_data<C: Chain>(
&self,
) -> Result<StoredHeaderData<BlockNumberOf<C>, HashOf<C>>, codec::Error> {
StoredHeaderData::<BlockNumberOf<C>, HashOf<C>>::decode(&mut &self.0[..])
}
}
/// Stored parachain head data builder.
pub trait ParaStoredHeaderDataBuilder {
/// Maximal parachain head size that we may accept for free. All heads above
/// this limit are submitted for a regular fee.
fn max_free_head_size() -> u32;
/// Return number of parachains that are supported by this builder.
fn supported_parachains() -> u32;
/// Try to build head data from encoded head of parachain with given id.
fn try_build(para_id: ParaId, para_head: &ParaHead) -> Option<ParaStoredHeaderData>;
}
/// Helper for using single parachain as `ParaStoredHeaderDataBuilder`.
pub struct SingleParaStoredHeaderDataBuilder<C: Parachain>(PhantomData<C>);
impl<C: Parachain> ParaStoredHeaderDataBuilder for SingleParaStoredHeaderDataBuilder<C> {
fn max_free_head_size() -> u32 {
C::MAX_HEADER_SIZE
}
fn supported_parachains() -> u32 {
1
}
fn try_build(para_id: ParaId, para_head: &ParaHead) -> Option<ParaStoredHeaderData> {
if para_id == ParaId(C::PARACHAIN_ID) {
let header = HeaderOf::<C>::decode(&mut &para_head.0[..]).ok()?;
return Some(ParaStoredHeaderData(
StoredHeaderData { number: *header.number(), state_root: *header.state_root() }
.encode(),
))
}
None
}
}
// Tries to build header data from each tuple member, short-circuiting on first successful one.
#[impl_trait_for_tuples::impl_for_tuples(1, 30)]
#[tuple_types_custom_trait_bound(Parachain)]
impl ParaStoredHeaderDataBuilder for C {
fn max_free_head_size() -> u32 {
let mut result = 0_u32;
for_tuples!( #(
result = sp_std::cmp::max(
result,
SingleParaStoredHeaderDataBuilder::<C>::max_free_head_size(),
);
)* );
result
}
fn supported_parachains() -> u32 {
let mut result = 0;
for_tuples!( #(
result += SingleParaStoredHeaderDataBuilder::<C>::supported_parachains();
)* );
result
}
fn try_build(para_id: ParaId, para_head: &ParaHead) -> Option<ParaStoredHeaderData> {
for_tuples!( #(
let maybe_para_head = SingleParaStoredHeaderDataBuilder::<C>::try_build(para_id, para_head);
if let Some(maybe_para_head) = maybe_para_head {
return Some(maybe_para_head);
}
)* );
None
}
}
/// A minimized version of `pallet-bridge-parachains::Call` that can be used without a runtime.
#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
#[allow(non_camel_case_types)]
pub enum BridgeParachainCall {
/// `pallet-bridge-parachains::Call::submit_parachain_heads`
#[codec(index = 0)]
submit_parachain_heads {
/// Relay chain block, for which we have submitted the `parachain_heads_proof`.
at_relay_block: (RelayBlockNumber, RelayBlockHash),
/// Parachain identifiers and their head hashes.
parachains: Vec<(ParaId, ParaHash)>,
/// Parachain heads proof.
parachain_heads_proof: ParaHeadsProof,
},
}
[package]
name = "bp-polkadot-core"
description = "Primitives of Polkadot-like runtime."
version = "0.7.0"
authors.workspace = true
edition.workspace = true
license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
[lints]
workspace = true
[dependencies]
codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] }
parity-util-mem = { version = "0.12.0", optional = true }
scale-info = { version = "2.10.0", default-features = false, features = ["derive"] }
serde = { default-features = false, features = ["derive"], optional = true, workspace = true }
# Bridge Dependencies
bp-messages = { path = "../messages", default-features = false }
bp-runtime = { path = "../runtime", default-features = false }
# Substrate Based Dependencies
frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
[dev-dependencies]
hex = "0.4"
[features]
default = ["std"]
std = [
"bp-messages/std",
"bp-runtime/std",
"codec/std",
"frame-support/std",
"frame-system/std",
"parity-util-mem",
"scale-info/std",
"serde",
"sp-core/std",
"sp-runtime/std",
"sp-std/std",
]
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Primitives of the Polkadot-like chains.
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
use bp_messages::MessageNonce;
use bp_runtime::{
self,
extensions::{
ChargeTransactionPayment, CheckEra, CheckGenesis, CheckNonZeroSender, CheckNonce,
CheckSpecVersion, CheckTxVersion, CheckWeight, GenericTransactionExtension,
TransactionExtensionSchema,
},
EncodedOrDecodedCall, StorageMapKeyProvider, TransactionEra,
};
use frame_support::{
dispatch::DispatchClass,
parameter_types,
weights::{
constants::{BlockExecutionWeight, WEIGHT_REF_TIME_PER_SECOND},
Weight,
},
Blake2_128Concat,
};
use frame_system::limits;
use sp_core::{storage::StorageKey, Hasher as HasherT};
use sp_runtime::{
generic,
traits::{BlakeTwo256, IdentifyAccount, Verify},
MultiAddress, MultiSignature, OpaqueExtrinsic,
};
use sp_std::prelude::Vec;
// Re-export's to avoid extra substrate dependencies in chain-specific crates.
pub use frame_support::{weights::constants::ExtrinsicBaseWeight, Parameter};
pub use sp_runtime::{traits::Convert, Perbill};
pub mod parachains;
/// Maximal number of GRANDPA authorities at Polkadot-like chains.
///
/// Ideally, we would set it to the value of `MaxAuthorities` constant from bridged runtime
/// configurations. But right now it is set to the `100_000`, which makes PoV size for
/// our bridge hub parachains huge. So let's stick to the real-world value here.
///
/// Right now both Kusama and Polkadot aim to have around 1000 validators. Let's be safe here and
/// take a bit more here.
pub const MAX_AUTHORITIES_COUNT: u32 = 1_256;
/// Reasonable number of headers in the `votes_ancestries` on Polkadot-like chains.
///
/// See [`bp-header-chain::ChainWithGrandpa`] for more details.
///
/// This value comes from recent (December, 2023) Kusama and Polkadot headers. There are no
/// justifications with any additional headers in votes ancestry, so reasonable headers may
/// be set to zero. But we assume that there may be small GRANDPA lags, so we're leaving some
/// reserve here.
pub const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 2;
/// Average header size in `votes_ancestries` field of justification on Polkadot-like
/// chains.
///
/// See [`bp-header-chain::ChainWithGrandpa`] for more details.
///
/// This value comes from recent (December, 2023) Kusama headers. Most of headers are `327` bytes
/// there, but let's have some reserve and make it 1024.
pub const AVERAGE_HEADER_SIZE: u32 = 1024;
/// Approximate maximal header size on Polkadot-like chains.
///
/// See [`bp-header-chain::ChainWithGrandpa`] for more details.
///
/// This value comes from recent (December, 2023) Kusama headers. Maximal header is a mandatory
/// header. In its SCALE-encoded form it is `113407` bytes. Let's have some reserve here.
pub const MAX_MANDATORY_HEADER_SIZE: u32 = 120 * 1024;
/// Number of extra bytes (excluding size of storage value itself) of storage proof, built at
/// Polkadot-like chain. This mostly depends on number of entries in the storage trie.
/// Some reserve is reserved to account future chain growth.
///
/// To compute this value, we've synced Kusama chain blocks [0; 6545733] to see if there were
/// any significant changes of the storage proof size (NO):
///
/// - at block 3072 the storage proof size overhead was 579 bytes;
/// - at block 2479616 it was 578 bytes;
/// - at block 4118528 it was 711 bytes;
/// - at block 6540800 it was 779 bytes.
///
/// The number of storage entries at the block 6546170 was 351207 and number of trie nodes in
/// the storage proof was 5 (log(16, 351207) ~ 4.6).
///
/// So the assumption is that the storage proof size overhead won't be larger than 1024 in the
/// nearest future. If it'll ever break this barrier, then we'll need to update this constant
/// at next runtime upgrade.
pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024;
/// All Polkadot-like chains allow normal extrinsics to fill block up to 75 percent.
///
/// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate.
const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75);
/// All Polkadot-like chains allow 2 seconds of compute with a 6-second average block time.
///
/// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate.
pub const MAXIMUM_BLOCK_WEIGHT: Weight =
Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_mul(2), u64::MAX);
/// All Polkadot-like chains assume that an on-initialize consumes 1 percent of the weight on
/// average, hence a single extrinsic will not be allowed to consume more than
/// `AvailableBlockRatio - 1 percent`.
///
/// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate.
pub const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(1);
parameter_types! {
/// All Polkadot-like chains have maximal block size set to 5MB.
///
/// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate.
pub BlockLength: limits::BlockLength = limits::BlockLength::max_with_normal_ratio(
5 * 1024 * 1024,
NORMAL_DISPATCH_RATIO,
);
/// All Polkadot-like chains have the same block weights.
///
/// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate.
pub BlockWeights: limits::BlockWeights = limits::BlockWeights::builder()
.base_block(BlockExecutionWeight::get())
.for_class(DispatchClass::all(), |weights| {
weights.base_extrinsic = ExtrinsicBaseWeight::get();
})
.for_class(DispatchClass::Normal, |weights| {
weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT);
})
.for_class(DispatchClass::Operational, |weights| {
weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT);
// Operational transactions have an extra reserved space, so that they
// are included even if block reached `MAXIMUM_BLOCK_WEIGHT`.
weights.reserved = Some(
MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT,
);
})
.avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO)
.build_or_panic();
}
// TODO [#78] may need to be updated after https://github.com/paritytech/parity-bridges-common/issues/78
/// Maximal number of messages in single delivery transaction.
pub const MAX_MESSAGES_IN_DELIVERY_TRANSACTION: MessageNonce = 128;
/// Maximal number of bytes, included in the signed Polkadot-like transaction apart from the encoded
/// call itself.
///
/// Can be computed by subtracting encoded call size from raw transaction size.
pub const TX_EXTRA_BYTES: u32 = 256;
/// Re-export `time_units` to make usage easier.
pub use time_units::*;
/// Human readable time units defined in terms of number of blocks.
pub mod time_units {
use super::BlockNumber;
/// Milliseconds between Polkadot-like chain blocks.
pub const MILLISECS_PER_BLOCK: u64 = 6000;
/// Slot duration in Polkadot-like chain consensus algorithms.
pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK;
/// A minute, expressed in Polkadot-like chain blocks.
pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber);
/// A hour, expressed in Polkadot-like chain blocks.
pub const HOURS: BlockNumber = MINUTES * 60;
/// A day, expressed in Polkadot-like chain blocks.
pub const DAYS: BlockNumber = HOURS * 24;
}
/// Block number type used in Polkadot-like chains.
pub type BlockNumber = u32;
/// Hash type used in Polkadot-like chains.
pub type Hash = <BlakeTwo256 as HasherT>::Out;
/// Hashing type.
pub type Hashing = BlakeTwo256;
/// The type of object that can produce hashes on Polkadot-like chains.
pub type Hasher = BlakeTwo256;
/// The header type used by Polkadot-like chains.
pub type Header = generic::Header<BlockNumber, Hasher>;
/// Signature type used by Polkadot-like chains.
pub type Signature = MultiSignature;
/// Public key of account on Polkadot-like chains.
pub type AccountPublic = <Signature as Verify>::Signer;
/// Id of account on Polkadot-like chains.
pub type AccountId = <AccountPublic as IdentifyAccount>::AccountId;
/// Address of account on Polkadot-like chains.
pub type AccountAddress = MultiAddress<AccountId, ()>;
/// Nonce of a transaction on the Polkadot-like chains.
pub type Nonce = u32;
/// Block type of Polkadot-like chains.
pub type Block = generic::Block<Header, OpaqueExtrinsic>;
/// Polkadot-like block signed with a Justification.
pub type SignedBlock = generic::SignedBlock<Block>;
/// The balance of an account on Polkadot-like chain.
pub type Balance = u128;
/// Unchecked Extrinsic type.
pub type UncheckedExtrinsic<Call, TransactionExt> = generic::UncheckedExtrinsic<
AccountAddress,
EncodedOrDecodedCall<Call>,
Signature,
TransactionExt,
>;
/// Account address, used by the Polkadot-like chain.
pub type Address = MultiAddress<AccountId, ()>;
/// Returns maximal extrinsic size on all Polkadot-like chains.
pub fn max_extrinsic_size() -> u32 {
*BlockLength::get().max.get(DispatchClass::Normal)
}
/// Returns maximal extrinsic weight on all Polkadot-like chains.
pub fn max_extrinsic_weight() -> Weight {
BlockWeights::get()
.get(DispatchClass::Normal)
.max_extrinsic
.unwrap_or(Weight::MAX)
}
/// Provides a storage key for account data.
///
/// We need to use this approach when we don't have access to the runtime.
/// The equivalent command to invoke in case full `Runtime` is known is this:
/// `let key = frame_system::Account::<Runtime>::storage_map_final_key(&account_id);`
pub struct AccountInfoStorageMapKeyProvider;
impl StorageMapKeyProvider for AccountInfoStorageMapKeyProvider {
const MAP_NAME: &'static str = "Account";
type Hasher = Blake2_128Concat;
type Key = AccountId;
// This should actually be `AccountInfo`, but we don't use this property in order to decode the
// data. So we use `Vec<u8>` as if we would work with encoded data.
type Value = Vec<u8>;
}
impl AccountInfoStorageMapKeyProvider {
/// Name of the system pallet.
const PALLET_NAME: &'static str = "System";
/// Return storage key for given account data.
pub fn final_key(id: &AccountId) -> StorageKey {
<Self as StorageMapKeyProvider>::final_key(Self::PALLET_NAME, id)
}
}
/// Extra signed extension data that is used by most chains.
pub type CommonTransactionExtra = (
CheckNonZeroSender,
CheckSpecVersion,
CheckTxVersion,
CheckGenesis<Hash>,
CheckEra<Hash>,
CheckNonce<Nonce>,
CheckWeight,
ChargeTransactionPayment<Balance>,
);
/// Extra transaction extension data that starts with `CommonTransactionExtra`.
pub type SuffixedCommonTransactionExtension<Suffix> =
GenericTransactionExtension<(CommonTransactionExtra, Suffix)>;
/// Helper trait to define some extra methods on `SuffixedCommonTransactionExtension`.
pub trait SuffixedCommonTransactionExtensionExt<Suffix: TransactionExtensionSchema> {
/// Create signed extension from its components.
fn from_params(
spec_version: u32,
transaction_version: u32,
era: TransactionEra<BlockNumber, Hash>,
genesis_hash: Hash,
nonce: Nonce,
tip: Balance,
extra: (Suffix::Payload, Suffix::Implicit),
) -> Self;
/// Return transaction nonce.
fn nonce(&self) -> Nonce;
/// Return transaction tip.
fn tip(&self) -> Balance;
}
impl<Suffix> SuffixedCommonTransactionExtensionExt<Suffix>
for SuffixedCommonTransactionExtension<Suffix>
where
Suffix: TransactionExtensionSchema,
{
fn from_params(
spec_version: u32,
transaction_version: u32,
era: TransactionEra<BlockNumber, Hash>,
genesis_hash: Hash,
nonce: Nonce,
tip: Balance,
extra: (Suffix::Payload, Suffix::Implicit),
) -> Self {
GenericTransactionExtension::new(
(
(
(), // non-zero sender
(), // spec version
(), // tx version
(), // genesis
era.frame_era(), // era
nonce.into(), // nonce (compact encoding)
(), // Check weight
tip.into(), // transaction payment / tip (compact encoding)
),
extra.0,
),
Some((
(
(),
spec_version,
transaction_version,
genesis_hash,
era.signed_payload(genesis_hash),
(),
(),
(),
),
extra.1,
)),
)
}
fn nonce(&self) -> Nonce {
let common_payload = self.payload.0;
common_payload.5 .0
}
fn tip(&self) -> Balance {
let common_payload = self.payload.0;
common_payload.7 .0
}
}
/// Signed extension that is used by most chains.
pub type CommonTransactionExtension = SuffixedCommonTransactionExtension<()>;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn should_generate_storage_key() {
let acc = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32,
]
.into();
let key = AccountInfoStorageMapKeyProvider::final_key(&acc);
assert_eq!(hex::encode(key), "26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da92dccd599abfe1920a1cff8a7358231430102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20");
}
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Primitives of polkadot-like chains, that are related to parachains functionality.
//!
//! Even though this (bridges) repository references polkadot repository, we can't
//! reference polkadot crates from pallets. That's because bridges repository is
//! included in the Cumulus repository and included pallets are used by Cumulus
//! parachains. Having pallets that are referencing polkadot, would mean that there may
//! be two versions of polkadot crates included in the runtime. Which is bad.
use bp_runtime::{RawStorageProof, Size};
use codec::{CompactAs, Decode, Encode, MaxEncodedLen};
use scale_info::TypeInfo;
use sp_core::Hasher;
use sp_runtime::RuntimeDebug;
use sp_std::vec::Vec;
#[cfg(feature = "std")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use parity_util_mem::MallocSizeOf;
/// Parachain id.
///
/// This is an equivalent of the `polkadot_parachain_primitives::Id`, which is a compact-encoded
/// `u32`.
#[derive(
Clone,
CompactAs,
Copy,
Decode,
Default,
Encode,
Eq,
Hash,
MaxEncodedLen,
Ord,
PartialEq,
PartialOrd,
RuntimeDebug,
TypeInfo,
)]
pub struct ParaId(pub u32);
impl From<u32> for ParaId {
fn from(id: u32) -> Self {
ParaId(id)
}
}
/// Parachain head.
///
/// This is an equivalent of the `polkadot_parachain_primitives::HeadData`.
///
/// The parachain head means (at least in Cumulus) a SCALE-encoded parachain header.
#[derive(
PartialEq, Eq, Clone, PartialOrd, Ord, Encode, Decode, RuntimeDebug, TypeInfo, Default,
)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, MallocSizeOf))]
pub struct ParaHead(pub Vec<u8>);
impl ParaHead {
/// Returns the hash of this head data.
pub fn hash(&self) -> crate::Hash {
sp_runtime::traits::BlakeTwo256::hash(&self.0)
}
}
/// Parachain head hash.
pub type ParaHash = crate::Hash;
/// Parachain head hasher.
pub type ParaHasher = crate::Hasher;
/// Raw storage proof of parachain heads, stored in polkadot-like chain runtime.
#[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo)]
pub struct ParaHeadsProof {
/// Unverified storage proof of finalized parachain heads.
pub storage_proof: RawStorageProof,
}
impl Size for ParaHeadsProof {
fn size(&self) -> u32 {
u32::try_from(
self.storage_proof
.iter()
.fold(0usize, |sum, node| sum.saturating_add(node.len())),
)
.unwrap_or(u32::MAX)
}
}
[package]
name = "bp-relayers"
description = "Primitives of relayers module."
version = "0.7.0"
authors.workspace = true
edition.workspace = true
license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
[lints]
workspace = true
[dependencies]
codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] }
scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] }
# Bridge Dependencies
bp-messages = { path = "../messages", default-features = false }
bp-runtime = { path = "../runtime", default-features = false }
# Substrate Dependencies
frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
[dev-dependencies]
hex = "0.4"
hex-literal = "0.4"
[features]
default = ["std"]
std = [
"bp-messages/std",
"bp-runtime/std",
"codec/std",
"frame-support/std",
"scale-info/std",
"sp-runtime/std",
"sp-std/std",
]
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Primitives of messages module.
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
pub use registration::{ExplicitOrAccountParams, Registration, StakeAndSlash};
use bp_messages::LaneId;
use bp_runtime::{ChainId, StorageDoubleMapKeyProvider};
use frame_support::{traits::tokens::Preservation, Blake2_128Concat, Identity};
use scale_info::TypeInfo;
use sp_runtime::{
codec::{Codec, Decode, Encode, EncodeLike, MaxEncodedLen},
traits::AccountIdConversion,
TypeId,
};
use sp_std::{fmt::Debug, marker::PhantomData};
mod registration;
/// The owner of the sovereign account that should pay the rewards.
///
/// Each of the 2 final points connected by a bridge owns a sovereign account at each end of the
/// bridge. So here, at this end of the bridge there can be 2 sovereign accounts that pay rewards.
#[derive(Copy, Clone, Debug, Decode, Encode, Eq, PartialEq, TypeInfo, MaxEncodedLen)]
pub enum RewardsAccountOwner {
/// The sovereign account of the final chain on this end of the bridge.
ThisChain,
/// The sovereign account of the final chain on the other end of the bridge.
BridgedChain,
}
/// Structure used to identify the account that pays a reward to the relayer.
///
/// A bridge connects 2 bridge ends. Each one is located on a separate relay chain. The bridge ends
/// can be the final destinations of the bridge, or they can be intermediary points
/// (e.g. a bridge hub) used to forward messages between pairs of parachains on the bridged relay
/// chains. A pair of such parachains is connected using a bridge lane. Each of the 2 final
/// destinations of a bridge lane must have a sovereign account at each end of the bridge and each
/// of the sovereign accounts will pay rewards for different operations. So we need multiple
/// parameters to identify the account that pays a reward to the relayer.
#[derive(Copy, Clone, Debug, Decode, Encode, Eq, PartialEq, TypeInfo, MaxEncodedLen)]
pub struct RewardsAccountParams {
lane_id: LaneId,
bridged_chain_id: ChainId,
owner: RewardsAccountOwner,
}
impl RewardsAccountParams {
/// Create a new instance of `RewardsAccountParams`.
pub const fn new(
lane_id: LaneId,
bridged_chain_id: ChainId,
owner: RewardsAccountOwner,
) -> Self {
Self { lane_id, bridged_chain_id, owner }
}
}
impl TypeId for RewardsAccountParams {
const TYPE_ID: [u8; 4] = *b"brap";
}
/// Reward payment procedure.
pub trait PaymentProcedure<Relayer, Reward> {
/// Error that may be returned by the procedure.
type Error: Debug;
/// Pay reward to the relayer from the account with provided params.
fn pay_reward(
relayer: &Relayer,
rewards_account_params: RewardsAccountParams,
reward: Reward,
) -> Result<(), Self::Error>;
}
impl<Relayer, Reward> PaymentProcedure<Relayer, Reward> for () {
type Error = &'static str;
fn pay_reward(_: &Relayer, _: RewardsAccountParams, _: Reward) -> Result<(), Self::Error> {
Ok(())
}
}
/// Reward payment procedure that does `balances::transfer` call from the account, derived from
/// given params.
pub struct PayRewardFromAccount<T, Relayer>(PhantomData<(T, Relayer)>);
impl<T, Relayer> PayRewardFromAccount<T, Relayer>
where
Relayer: Decode + Encode,
{
/// Return account that pays rewards based on the provided parameters.
pub fn rewards_account(params: RewardsAccountParams) -> Relayer {
params.into_sub_account_truncating(b"rewards-account")
}
}
impl<T, Relayer> PaymentProcedure<Relayer, T::Balance> for PayRewardFromAccount<T, Relayer>
where
T: frame_support::traits::fungible::Mutate<Relayer>,
Relayer: Decode + Encode + Eq,
{
type Error = sp_runtime::DispatchError;
fn pay_reward(
relayer: &Relayer,
rewards_account_params: RewardsAccountParams,
reward: T::Balance,
) -> Result<(), Self::Error> {
T::transfer(
&Self::rewards_account(rewards_account_params),
relayer,
reward,
Preservation::Expendable,
)
.map(drop)
}
}
/// Can be use to access the runtime storage key within the `RelayerRewards` map of the relayers
/// pallet.
pub struct RelayerRewardsKeyProvider<AccountId, Reward>(PhantomData<(AccountId, Reward)>);
impl<AccountId, Reward> StorageDoubleMapKeyProvider for RelayerRewardsKeyProvider<AccountId, Reward>
where
AccountId: Codec + EncodeLike,
Reward: Codec + EncodeLike,
{
const MAP_NAME: &'static str = "RelayerRewards";
type Hasher1 = Blake2_128Concat;
type Key1 = AccountId;
type Hasher2 = Identity;
type Key2 = RewardsAccountParams;
type Value = Reward;
}
#[cfg(test)]
mod tests {
use super::*;
use bp_messages::LaneId;
use sp_runtime::testing::H256;
#[test]
fn different_lanes_are_using_different_accounts() {
assert_eq!(
PayRewardFromAccount::<(), H256>::rewards_account(RewardsAccountParams::new(
LaneId([0, 0, 0, 0]),
*b"test",
RewardsAccountOwner::ThisChain
)),
hex_literal::hex!("62726170000000007465737400726577617264732d6163636f756e7400000000")
.into(),
);
assert_eq!(
PayRewardFromAccount::<(), H256>::rewards_account(RewardsAccountParams::new(
LaneId([0, 0, 0, 1]),
*b"test",
RewardsAccountOwner::ThisChain
)),
hex_literal::hex!("62726170000000017465737400726577617264732d6163636f756e7400000000")
.into(),
);
}
#[test]
fn different_directions_are_using_different_accounts() {
assert_eq!(
PayRewardFromAccount::<(), H256>::rewards_account(RewardsAccountParams::new(
LaneId([0, 0, 0, 0]),
*b"test",
RewardsAccountOwner::ThisChain
)),
hex_literal::hex!("62726170000000007465737400726577617264732d6163636f756e7400000000")
.into(),
);
assert_eq!(
PayRewardFromAccount::<(), H256>::rewards_account(RewardsAccountParams::new(
LaneId([0, 0, 0, 0]),
*b"test",
RewardsAccountOwner::BridgedChain
)),
hex_literal::hex!("62726170000000007465737401726577617264732d6163636f756e7400000000")
.into(),
);
}
}