// Copyright 2019-2021 Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see .
//! The loop basically reads all missing headers and their finality proofs from the source client.
//! The proof for the best possible header is then submitted to the target node. The only exception
//! is the mandatory headers, which we always submit to the target node. For such headers, we
//! assume that the persistent proof either exists, or will eventually become available.
use crate::{sync_loop_metrics::SyncLoopMetrics, Error, FinalitySyncPipeline, SourceHeader};
use crate::{
base::SourceClientBase,
finality_proofs::{FinalityProofsBuf, FinalityProofsStream},
headers::{JustifiedHeader, JustifiedHeaderSelector},
};
use async_trait::async_trait;
use backoff::{backoff::Backoff, ExponentialBackoff};
use futures::{future::Fuse, select, Future, FutureExt};
use num_traits::Saturating;
use relay_utils::{
metrics::MetricsParams, relay_loop::Client as RelayClient, retry_backoff, FailedClient,
HeaderId, MaybeConnectionError, TrackedTransactionStatus, TransactionTracker,
};
use std::{
fmt::Debug,
time::{Duration, Instant},
};
/// Finality proof synchronization loop parameters.
#[derive(Debug, Clone)]
pub struct FinalitySyncParams {
/// Interval at which we check updates on both clients. Normally should be larger than
/// `min(source_block_time, target_block_time)`.
///
/// This parameter may be used to limit transactions rate. Increase the value && you'll get
/// infrequent updates => sparse headers => potential slow down of bridge applications, but
/// pallet storage won't be super large. Decrease the value to near `source_block_time` and
/// you'll get transaction for (almost) every block of the source chain => all source headers
/// will be known to the target chain => bridge applications will run faster, but pallet
/// storage may explode (but if pruning is there, then it's fine).
pub tick: Duration,
/// Number of finality proofs to keep in internal buffer between loop iterations.
///
/// While in "major syncing" state, we still read finality proofs from the stream. They're
/// stored in the internal buffer between loop iterations. When we're close to the tip of the
/// chain, we may meet finality delays if headers are not finalized frequently. So instead of
/// waiting for next finality proof to appear in the stream, we may use existing proof from
/// that buffer.
pub recent_finality_proofs_limit: usize,
/// Timeout before we treat our transactions as lost and restart the whole sync process.
pub stall_timeout: Duration,
/// If true, only mandatory headers are relayed.
pub only_mandatory_headers: bool,
}
/// Source client used in finality synchronization loop.
#[async_trait]
pub trait SourceClient: SourceClientBase
{
/// Get best finalized block number.
async fn best_finalized_block_number(&self) -> Result;
/// Get canonical header and its finality proof by number.
async fn header_and_finality_proof(
&self,
number: P::Number,
) -> Result<(P::Header, Option), Self::Error>;
}
/// Target client used in finality synchronization loop.
#[async_trait]
pub trait TargetClient: RelayClient {
/// Transaction tracker to track submitted transactions.
type TransactionTracker: TransactionTracker;
/// Get best finalized source block number.
async fn best_finalized_source_block_id(
&self,
) -> Result, Self::Error>;
/// Submit header finality proof.
async fn submit_finality_proof(
&self,
header: P::Header,
proof: P::FinalityProof,
) -> Result;
}
/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs
/// sync loop.
pub fn metrics_prefix() -> String {
format!("{}_to_{}_Sync", P::SOURCE_NAME, P::TARGET_NAME)
}
pub struct SyncInfo {
pub best_number_at_source: P::Number,
pub best_number_at_target: P::Number,
pub is_using_same_fork: bool,
}
impl SyncInfo
{
/// Checks if both clients are on the same fork.
async fn is_on_same_fork>(
source_client: &SC,
id_at_target: &HeaderId,
) -> Result {
let header_at_source = source_client.header_and_finality_proof(id_at_target.0).await?.0;
let header_hash_at_source = header_at_source.hash();
Ok(if id_at_target.1 == header_hash_at_source {
true
} else {
log::error!(
target: "bridge",
"Source node ({}) and pallet at target node ({}) have different headers at the same height {:?}: \
at-source {:?} vs at-target {:?}",
P::SOURCE_NAME,
P::TARGET_NAME,
id_at_target.0,
header_hash_at_source,
id_at_target.1,
);
false
})
}
async fn new, TC: TargetClient
>(
source_client: &SC,
target_client: &TC,
) -> Result> {
let best_number_at_source =
source_client.best_finalized_block_number().await.map_err(Error::Source)?;
let best_id_at_target =
target_client.best_finalized_source_block_id().await.map_err(Error::Target)?;
let best_number_at_target = best_id_at_target.0;
let is_using_same_fork = Self::is_on_same_fork(source_client, &best_id_at_target)
.await
.map_err(Error::Source)?;
Ok(Self { best_number_at_source, best_number_at_target, is_using_same_fork })
}
fn update_metrics(&self, metrics_sync: &Option) {
if let Some(metrics_sync) = metrics_sync {
metrics_sync.update_best_block_at_source(self.best_number_at_source);
metrics_sync.update_best_block_at_target(self.best_number_at_target);
metrics_sync.update_using_same_fork(self.is_using_same_fork);
}
}
pub fn num_headers(&self) -> P::Number {
self.best_number_at_source.saturating_sub(self.best_number_at_target)
}
}
/// Information about transaction that we have submitted.
#[derive(Debug, Clone)]
pub struct Transaction {
/// Submitted transaction tracker.
tracker: Tracker,
/// The number of the header we have submitted.
header_number: Number,
}
impl Transaction {
pub async fn submit<
P: FinalitySyncPipeline,
TC: TargetClient
,
>(
target_client: &TC,
header: P::Header,
justification: P::FinalityProof,
) -> Result {
let header_number = header.number();
log::debug!(
target: "bridge",
"Going to submit finality proof of {} header #{:?} to {}",
P::SOURCE_NAME,
header_number,
P::TARGET_NAME,
);
let tracker = target_client.submit_finality_proof(header, justification).await?;
Ok(Transaction { tracker, header_number })
}
async fn track<
P: FinalitySyncPipeline,
SC: SourceClient
> {
match self.tracker.wait().await {
TrackedTransactionStatus::Finalized(_) => {
// The transaction has been finalized, but it may have been finalized in the
// "failed" state. So let's check if the block number was actually updated.
target_client
.best_finalized_source_block_id()
.await
.map_err(Error::Target)
.and_then(|best_id_at_target| {
if self.header_number > best_id_at_target.0 {
return Err(Error::ProofSubmissionTxFailed {
submitted_number: self.header_number,
best_number_at_target: best_id_at_target.0,
})
}
Ok(())
})
},
TrackedTransactionStatus::Lost => Err(Error::ProofSubmissionTxLost),
}
}
}
/// Finality synchronization loop state.
struct FinalityLoop, TC: TargetClient