Newer
Older
// Copyright 2021 Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Substrate Finality Verifier Pallet
//!
//! The goal of this pallet is to provide a safe interface for writing finalized headers to an
//! external pallet which tracks headers and finality proofs. By safe, we mean that only headers
//! whose finality has been verified will be written to the underlying pallet.
//!
//! By verifying the finality of headers before writing them to storage we prevent DoS vectors in
//! which unfinalized headers get written to storage even if they don't have a chance of being
//! finalized in the future (such as in the case where a different fork gets finalized).
//!
//! The underlying pallet used for storage is assumed to be a pallet which tracks headers and
//! GRANDPA authority set changes. This information is used during the verification of GRANDPA
//! finality proofs.
#![cfg_attr(not(feature = "std"), no_std)]
// Runtime-generated enums
#![allow(clippy::large_enum_variant)]
use bp_header_chain::{justification::verify_justification, AncestryChecker, HeaderChain};
use bp_runtime::{BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf};
use codec::{Decode, Encode};
use finality_grandpa::voter_set::VoterSet;
use frame_support::{dispatch::DispatchError, ensure};
use frame_system::{ensure_signed, RawOrigin};
#[cfg(feature = "std")]
use serde::{Deserialize, Serialize};
use sp_finality_grandpa::{ConsensusLog, GRANDPA_ENGINE_ID};
use sp_runtime::traits::{BadOrigin, Header as HeaderT, Zero};
use sp_runtime::RuntimeDebug;
use sp_std::vec::Vec;
// Re-export in crate namespace for `construct_runtime!`
pub use pallet::*;
/// Block number of the bridged chain.
pub type BridgedBlockNumber<T> = BlockNumberOf<<T as Config>::BridgedChain>;
/// Block hash of the bridged chain.
pub type BridgedBlockHash<T> = HashOf<<T as Config>::BridgedChain>;
/// Hasher of the bridged chain.
pub type BridgedBlockHasher<T> = HasherOf<<T as Config>::BridgedChain>;
/// Header of the bridged chain.
pub type BridgedHeader<T> = HeaderOf<<T as Config>::BridgedChain>;
#[frame_support::pallet]
pub mod pallet {
use super::*;
use frame_support::pallet_prelude::*;
use frame_system::pallet_prelude::*;
#[pallet::config]
pub trait Config: frame_system::Config {
/// The chain we are bridging to here.
type BridgedChain: Chain;
/// The pallet which we will use as our underlying storage mechanism.
type HeaderChain: HeaderChain<<Self::BridgedChain as Chain>::Header, DispatchError>;
/// The type of ancestry proof used by the pallet.
///
/// Will be used by the ancestry checker to verify that the header being finalized is
/// related to the best finalized header in storage.
type AncestryProof: Parameter;
/// The type through which we will verify that a given header is related to the last
/// finalized header in our storage pallet.
type AncestryChecker: AncestryChecker<<Self::BridgedChain as Chain>::Header, Self::AncestryProof>;
/// The upper bound on the number of requests allowed by the pallet.
///
/// A request refers to an action which writes a header to storage.
///
/// Once this bound is reached the pallet will not allow any dispatchables to be called
/// until the request count has decreased.
#[pallet::constant]
type MaxRequests: Get<u32>;
#[pallet::pallet]
pub struct Pallet<T>(PhantomData<T>);
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn on_initialize(_n: T::BlockNumber) -> frame_support::weights::Weight {
<RequestCount<T>>::mutate(|count| *count = count.saturating_sub(1));
(0_u64)
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
}
}
#[pallet::call]
impl<T: Config> Pallet<T> {
/// Verify a target header is finalized according to the given finality proof.
/// It will use the underlying storage pallet to fetch information about the current
/// authorities and best finalized header in order to verify that the header is finalized.
///
/// If successful in verification, it will write the target header to the underlying storage
/// pallet.
#[pallet::weight(0)]
origin: OriginFor<T>,
finality_target: BridgedHeader<T>,
justification: Vec<u8>,
ancestry_proof: T::AncestryProof,
) -> DispatchResultWithPostInfo {
ensure_operational::<T>()?;
ensure!(
Self::request_count() < T::MaxRequests::get(),
<Error<T>>::TooManyRequests
);
let (hash, number) = (finality_target.hash(), finality_target.number());
log::trace!("Going to try and finalize header {:?}", finality_target);
let best_finalized = <ImportedHeaders<T>>::get(<BestFinalized<T>>::get()).expect(
"In order to reach this point the bridge must have been initialized. Afterwards,
every time `BestFinalized` is updated `ImportedHeaders` is also updated. Therefore
`ImportedHeaders` must contain an entry for `BestFinalized`.",
);
// We do a quick check here to ensure that our header chain is making progress and isn't
// "travelling back in time" (which could be indicative of something bad, e.g a hard-fork).
ensure!(best_finalized.number() < number, <Error<T>>::OldHeader);
let authority_set = <CurrentAuthoritySet<T>>::get();
let voter_set = VoterSet::new(authority_set.authorities).ok_or(<Error<T>>::InvalidAuthoritySet)?;
let set_id = authority_set.set_id;
verify_justification::<BridgedHeader<T>>((hash, *number), set_id, voter_set, &justification).map_err(
log::error!("Received invalid justification for {:?}: {:?}", finality_target, e);
<Error<T>>::InvalidJustification
},
)?;
let best_finalized = T::HeaderChain::best_finalized();
log::trace!("Checking ancestry against best finalized header: {:?}", &best_finalized);
ensure!(
T::AncestryChecker::are_ancestors(&best_finalized, &finality_target, &ancestry_proof),
<Error<T>>::InvalidAncestryProof
);
let _ = T::HeaderChain::append_header(finality_target.clone())?;
import_header::<T>(hash, finality_target)?;
<RequestCount<T>>::mutate(|count| *count += 1);
log::info!("Succesfully imported finalized header with hash {:?}!", hash);
Ok(().into())
}
/// Bootstrap the bridge pallet with an initial header and authority set from which to sync.
///
/// The initial configuration provided does not need to be the genesis header of the bridged
/// chain, it can be any arbirary header. You can also provide the next scheduled set change
/// if it is already know.
///
/// This function is only allowed to be called from a trusted origin and writes to storage
/// with practically no checks in terms of the validity of the data. It is important that
/// you ensure that valid data is being passed in.
#[pallet::weight((T::DbWeight::get().reads_writes(2, 5), DispatchClass::Operational))]
pub fn initialize(
origin: OriginFor<T>,
init_data: super::InitializationData<BridgedHeader<T>>,
) -> DispatchResultWithPostInfo {
ensure_owner_or_root::<T>(origin)?;
let init_allowed = !<BestFinalized<T>>::exists();
ensure!(init_allowed, <Error<T>>::AlreadyInitialized);
initialize_bridge::<T>(init_data.clone());
"Pallet has been initialized with the following parameters: {:?}",
init_data
);
Loading full blame...