Newer
Older
Svyatoslav Nikolsky
committed
if let Some(Some(changes_update)) = changes_update {
operation.op.update_changes_trie(changes_update)?;
Svyatoslav Nikolsky
committed
}
operation.op.insert_aux(aux)?;
if make_notifications {
if finalized {
operation.notify_finalized.push(hash);
operation.notify_imported = Some(ImportSummary {
hash,
origin,
header: import_headers.into_post(),
is_new_best,
storage_changes,
retracted,
})
Ok(ImportResult::imported(is_new_best))
fn block_execution(
&self,
transaction: &B::BlockImportOperation,
import_headers: &PrePostHeader<Block::Header>,
origin: BlockOrigin,
hash: Block::Hash,
body: Option<Vec<Block::Extrinsic>>,
) -> error::Result<(
Option<StorageUpdate<B, Block>>,
Option<Option<ChangesUpdate<Block>>>,
Option<(
Vec<(Vec<u8>, Option<Vec<u8>>)>,
Vec<(Vec<u8>, Vec<(Vec<u8>, Option<Vec<u8>>)>)>
)>
where
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + Clone,
{
match transaction.state()? {
Some(transaction_state) => {
let mut overlay = Default::default();
let get_execution_manager = |execution_strategy: ExecutionStrategy| {
match execution_strategy {
ExecutionStrategy::NativeElseWasm => ExecutionManager::NativeElseWasm,
ExecutionStrategy::AlwaysWasm => ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted),
ExecutionStrategy::NativeWhenPossible => ExecutionManager::NativeWhenPossible,
ExecutionStrategy::Both => ExecutionManager::Both(|wasm_result, native_result| {
let header = import_headers.post();
warn!("Consensus error between wasm and native block execution at block {}", hash);
warn!(" Header {:?}", header);
warn!(" Native result {:?}", native_result);
warn!(" Wasm result {:?}", wasm_result);
telemetry!(SUBSTRATE_INFO; "block.execute.consensus_failure";
"hash" => ?hash,
"origin" => ?origin,
"header" => ?header
);
wasm_result
}),
let (_, storage_update, changes_update) = self.executor.call_at_state::<_, _, _, NeverNativeValue, fn() -> _>(
transaction_state,
&mut overlay,
"Core_execute_block",
&<Block as BlockT>::new(import_headers.pre().clone(), body.unwrap_or_default()).encode(),
match origin {
BlockOrigin::NetworkInitialSync => get_execution_manager(self.execution_strategies().syncing),
_ => get_execution_manager(self.execution_strategies().importing),
},
None,
)?;
overlay.commit_prospective();
let (top, children) = overlay.into_committed();
let children = children.map(|(sk, it)| (sk, it.collect())).collect();
if import_headers.post().state_root() != &storage_update.1 {
return Err(error::Error::InvalidStateRoot);
}
Ok((Some(storage_update.0), Some(changes_update), Some((top.collect(), children))))
},
None => Ok((None, None, None))
}
}
fn apply_finality_with_block_hash(
operation: &mut ClientImportOperation<Block, Blake2Hasher, B>,
block: Block::Hash,
justification: Option<Justification>,
best_block: Block::Hash,
notify: bool,
) -> error::Result<()> {
// find tree route from last finalized to given block.
let last_finalized = self.backend.blockchain().last_finalized()?;
if block == last_finalized {
warn!("Possible safety violation: attempted to re-finalize last finalized block {:?} ", last_finalized);
return Ok(());
}
let route_from_finalized = crate::blockchain::tree_route(
|id| self.header(&id)?.ok_or_else(|| Error::UnknownBlock(format!("{:?}", id))),
BlockId::Hash(last_finalized),
BlockId::Hash(block),
)?;
if let Some(retracted) = route_from_finalized.retracted().get(0) {
warn!("Safety violation: attempted to revert finalized block {:?} which is not in the \
same chain as last finalized {:?}", retracted, last_finalized);
return Err(error::Error::NotInFinalizedChain);
let route_from_best = crate::blockchain::tree_route(
|id| self.header(&id)?.ok_or_else(|| Error::UnknownBlock(format!("{:?}", id))),
BlockId::Hash(best_block),
BlockId::Hash(block),
)?;
// if the block is not a direct ancestor of the current best chain,
// then some other block is the common ancestor.
if route_from_best.common_block().hash != block {
// NOTE: we're setting the finalized block as best block, this might
// be slightly innacurate since we might have a "better" block
// further along this chain, but since best chain selection logic is
// pluggable we cannot make a better choice here. usages that need
// an accurate "best" block need to go through `SelectChain`
// instead.
operation.op.mark_head(BlockId::Hash(block))?;
let enacted = route_from_finalized.enacted();
assert!(enacted.len() > 0);
for finalize_new in &enacted[..enacted.len() - 1] {
operation.op.mark_finalized(BlockId::Hash(finalize_new.hash), None)?;
assert_eq!(enacted.last().map(|e| e.hash), Some(block));
operation.op.mark_finalized(BlockId::Hash(block), justification)?;
if notify {
// sometimes when syncing, tons of blocks can be finalized at once.
// we'll send notifications spuriously in that case.
const MAX_TO_NOTIFY: usize = 256;
let enacted = route_from_finalized.enacted();
let start = enacted.len() - ::std::cmp::min(enacted.len(), MAX_TO_NOTIFY);
for finalized in &enacted[start..] {
operation.notify_finalized.push(finalized.hash);
}
}
Ok(())
}
fn notify_finalized(
&self,
notify_finalized: Vec<Block::Hash>,
) -> error::Result<()> {
let mut sinks = self.finality_notification_sinks.lock();
for finalized_hash in notify_finalized {
let header = self.header(&BlockId::Hash(finalized_hash))?
.expect("header already known to exist in DB because it is indicated in the tree route; qed");
telemetry!(SUBSTRATE_INFO; "notify.finalized";
"height" => format!("{}", header.number()),
"best" => ?finalized_hash,
);
let notification = FinalityNotification {
header,
hash: finalized_hash,
};
sinks.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
}
Ok(())
}
fn notify_imported(
&self,
notify_import: ImportSummary<Block>,
) -> error::Result<()> {
if let Some(storage_changes) = notify_import.storage_changes {
// TODO [ToDr] How to handle re-orgs? Should we re-emit all storage changes?
self.storage_notifications.lock()
¬ify_import.hash,
storage_changes.0.into_iter(),
storage_changes.1.into_iter().map(|(sk, v)| (sk, v.into_iter())),
);
}
let notification = BlockImportNotification::<Block> {
hash: notify_import.hash,
origin: notify_import.origin,
header: notify_import.header,
is_new_best: notify_import.is_new_best,
retracted: notify_import.retracted,
};
self.import_notification_sinks.lock()
.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
Ok(())
}
/// Attempts to revert the chain by `n` blocks. Returns the number of blocks that were
/// successfully reverted.
pub fn revert(&self, n: NumberFor<Block>) -> error::Result<NumberFor<Block>> {
Ok(self.backend.revert(n)?)
}
pub fn info(&self) -> ClientInfo<Block> {
let info = self.backend.blockchain().info();
ClientInfo {
used_state_cache_size: self.backend.used_state_cache_size(),
pub fn block_status(&self, id: &BlockId<Block>) -> error::Result<BlockStatus> {
// this can probably be implemented more efficiently
if let BlockId::Hash(ref h) = id {
if self.importing_block.read().as_ref().map_or(false, |importing| h == importing) {
return Ok(BlockStatus::Queued);
}
}
let hash_and_number = match id.clone() {
BlockId::Hash(hash) => self.backend.blockchain().number(hash)?.map(|n| (hash, n)),
BlockId::Number(n) => self.backend.blockchain().hash(n)?.map(|hash| (hash, n)),
};
match hash_and_number {
Some((hash, number)) => {
if self.backend.have_state_at(&hash, number) {
Ok(BlockStatus::InChainWithState)
} else {
Ok(BlockStatus::InChainPruned)
}
}
None => Ok(BlockStatus::Unknown),
}
}
/// Get block header by id.
pub fn header(&self, id: &BlockId<Block>) -> error::Result<Option<<Block as BlockT>::Header>> {
self.backend.blockchain().header(*id)
}
/// Get block body by id.
pub fn body(&self, id: &BlockId<Block>) -> error::Result<Option<Vec<<Block as BlockT>::Extrinsic>>> {
self.backend.blockchain().body(*id)
}
/// Get block justification set by id.
pub fn justification(&self, id: &BlockId<Block>) -> error::Result<Option<Justification>> {
self.backend.blockchain().justification(*id)
}
pub fn block(&self, id: &BlockId<Block>)
-> error::Result<Option<SignedBlock<Block>>>
Ok(match (self.header(id)?, self.body(id)?, self.justification(id)?) {
(Some(header), Some(extrinsics), justification) =>
Some(SignedBlock { block: Block::new(header, extrinsics), justification }),
/// Gets the uncles of the block with `target_hash` going back `max_generation` ancestors.
pub fn uncles(&self, target_hash: Block::Hash, max_generation: NumberFor<Block>) -> error::Result<Vec<Block::Hash>> {
let load_header = |id: Block::Hash| -> error::Result<Block::Header> {
match self.backend.blockchain().header(BlockId::Hash(id))? {
Some(hdr) => Ok(hdr),
None => Err(Error::UnknownBlock(format!("{:?}", id))),
let genesis_hash = self.backend.blockchain().info().genesis_hash;
if genesis_hash == target_hash { return Ok(Vec::new()); }
let mut current_hash = target_hash;
let mut current = load_header(current_hash)?;
let mut ancestor_hash = *current.parent_hash();
let mut ancestor = load_header(ancestor_hash)?;
let mut uncles = Vec::new();
for _generation in 0..max_generation.saturated_into() {
let children = self.backend.blockchain().children(ancestor_hash)?;
uncles.extend(children.into_iter().filter(|h| h != ¤t_hash));
current_hash = ancestor_hash;
if genesis_hash == current_hash { break; }
current = ancestor;
ancestor_hash = *current.parent_hash();
ancestor = load_header(ancestor_hash)?;
}
trace!("Collected {} uncles", uncles.len());
fn changes_trie_config(&self) -> Result<Option<ChangesTrieConfiguration>, Error> {
Ok(self.backend.state_at(BlockId::Number(self.backend.blockchain().info().best_number))?
.storage(well_known_keys::CHANGES_TRIE_CONFIG)
.map_err(|e| error::Error::from_state(Box::new(e)))?
.and_then(|c| Decode::decode(&mut &*c).ok()))
/// Prepare in-memory header that is used in execution environment.
fn prepare_environment_block(&self, parent: &BlockId<Block>) -> error::Result<Block::Header> {
Arkadiy Paronyan
committed
let parent_header = self.backend.blockchain().expect_header(*parent)?;
Ok(<<Block as BlockT>::Header as HeaderT>::new(
self.backend.blockchain().expect_block_number_from_id(parent)? + One::one(),
Default::default(),
Default::default(),
Default::default(),
))
}
impl<B, E, Block, RA> ProvideUncles<Block> for Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT<Hash=H256>,
{
fn uncles(&self, target_hash: Block::Hash, max_generation: NumberFor<Block>) -> error::Result<Vec<Block::Header>> {
Ok(Client::uncles(self, target_hash, max_generation)?
.into_iter()
.filter_map(|hash| Client::header(self, &BlockId::Hash(hash)).unwrap_or(None))
.collect()
)
}
}
impl<B, E, Block, RA> ChainHeaderBackend<Block> for Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync,
Block: BlockT<Hash=H256>,
RA: Send + Sync
{
fn header(&self, id: BlockId<Block>) -> error::Result<Option<Block::Header>> {
self.backend.blockchain().header(id)
}
fn info(&self) -> blockchain::Info<Block> {
self.backend.blockchain().info()
}
fn status(&self, id: BlockId<Block>) -> error::Result<blockchain::BlockStatus> {
self.backend.blockchain().status(id)
}
fn number(&self, hash: Block::Hash) -> error::Result<Option<<<Block as BlockT>::Header as HeaderT>::Number>> {
self.backend.blockchain().number(hash)
}
fn hash(&self, number: NumberFor<Block>) -> error::Result<Option<Block::Hash>> {
self.backend.blockchain().hash(number)
}
}
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
impl<B, E, Block, RA> ChainHeaderBackend<Block> for &Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync,
Block: BlockT<Hash=H256>,
RA: Send + Sync,
{
fn header(&self, id: BlockId<Block>) -> error::Result<Option<Block::Header>> {
(**self).backend.blockchain().header(id)
}
fn info(&self) -> blockchain::Info<Block> {
(**self).backend.blockchain().info()
}
fn status(&self, id: BlockId<Block>) -> error::Result<blockchain::BlockStatus> {
(**self).status(id)
}
fn number(&self, hash: Block::Hash) -> error::Result<Option<<<Block as BlockT>::Header as HeaderT>::Number>> {
(**self).number(hash)
}
fn hash(&self, number: NumberFor<Block>) -> error::Result<Option<Block::Hash>> {
(**self).hash(number)
}
}
impl<B, E, Block, RA> ProvideCache<Block> for Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
Block: BlockT<Hash=H256>,
{
fn cache(&self) -> Option<Arc<dyn Cache<Block>>> {
self.backend.blockchain().cache()
}
}
impl<B, E, Block, RA> ProvideRuntimeApi for Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher> + Clone + Send + Sync,
RA: ConstructRuntimeApi<Block, Self>
type Api = <RA as ConstructRuntimeApi<Block, Self>>::RuntimeApi;
fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> {
RA::construct_runtime_api(self)
}
}
impl<B, E, Block, RA> CallRuntimeAt<Block> for Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher> + Clone + Send + Sync,
Bastian Köcher
committed
fn call_api_at<
R: Encode + Decode + PartialEq,
NC: FnOnce() -> result::Result<R, &'static str> + UnwindSafe,
Bastian Köcher
committed
>(
&self,
at: &BlockId<Block>,
function: &'static str,
args: Vec<u8>,
changes: &RefCell<OverlayedChanges>,
initialize_block: InitializeBlock<'a, Block>,
native_call: Option<NC>,
recorder: &Option<Rc<RefCell<ProofRecorder<Block>>>>,
) -> error::Result<NativeOrEncoded<R>> {
let manager = match context {
ExecutionContext::BlockConstruction =>
self.execution_strategies.block_construction.get_manager(),
ExecutionContext::Syncing =>
self.execution_strategies.syncing.get_manager(),
ExecutionContext::Importing =>
self.execution_strategies.importing.get_manager(),
ExecutionContext::OffchainCall(Some((_, capabilities))) if capabilities.has_all() =>
self.execution_strategies.offchain_worker.get_manager(),
ExecutionContext::OffchainCall(_) =>
self.execution_strategies.other.get_manager(),
let capabilities = context.capabilities();
let mut offchain_extensions = match context {
ExecutionContext::OffchainCall(ext) => ext.map(|x| x.0),
}.map(|ext| offchain::LimitedExternalities::new(capabilities, ext));
self.executor.contextual_call::<_, _, fn(_,_) -> _,_,_>(
|| core_api.initialize_block(at, &self.prepare_environment_block(at)?),
at,
function,
&args,
changes,
capabilities.has(offchain::Capability::Keystore),
fn runtime_version_at(&self, at: &BlockId<Block>) -> error::Result<RuntimeVersion> {
self.runtime_version_at(at)
}
}
impl<'a, B, E, Block, RA> consensus::BlockImport<Block> for &'a Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher> + Clone + Send + Sync,
Block: BlockT<Hash=H256>,
type Error = ConsensusError;
/// Import a checked and validated block. If a justification is provided in
/// `BlockImportParams` then `finalized` *must* be true.
&mut self,
import_block: BlockImportParams<Block>,
new_cache: HashMap<CacheKeyId, Vec<u8>>,
) -> Result<ImportResult, Self::Error> {
self.lock_import_and_run(|operation| {
self.apply_block(operation, import_block, new_cache)
}).map_err(|e| {
warn!("Block import error:\n{:?}", e);
ConsensusError::ClientImport(e.to_string()).into()
})
/// Check block preconditions.
fn check_block(
&mut self,
hash: Block::Hash,
parent_hash: Block::Hash,
) -> Result<ImportResult, Self::Error> {
match self.block_status(&BlockId::Hash(parent_hash))
.map_err(|e| ConsensusError::ClientImport(e.to_string()))?
BlockStatus::InChainWithState | BlockStatus::Queued => {},
BlockStatus::Unknown | BlockStatus::InChainPruned => return Ok(ImportResult::UnknownParent),
BlockStatus::KnownBad => return Ok(ImportResult::KnownBad),
match self.block_status(&BlockId::Hash(hash))
.map_err(|e| ConsensusError::ClientImport(e.to_string()))?
BlockStatus::InChainWithState | BlockStatus::Queued => return Ok(ImportResult::AlreadyInChain),
BlockStatus::Unknown | BlockStatus::InChainPruned => {},
BlockStatus::KnownBad => return Ok(ImportResult::KnownBad),
Ok(ImportResult::imported(false))
impl<B, E, Block, RA> consensus::BlockImport<Block> for Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher> + Clone + Send + Sync,
Block: BlockT<Hash=H256>,
{
type Error = ConsensusError;
fn import_block(
&mut self,
import_block: BlockImportParams<Block>,
new_cache: HashMap<CacheKeyId, Vec<u8>>,
) -> Result<ImportResult, Self::Error> {
(&*self).import_block(import_block, new_cache)
}
fn check_block(
&mut self,
hash: Block::Hash,
parent_hash: Block::Hash,
) -> Result<ImportResult, Self::Error> {
(&*self).check_block(hash, parent_hash)
}
}
impl<B, E, Block, RA> Finalizer<Block, Blake2Hasher, B> for Client<B, E, Block, RA> where
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT<Hash=H256>,
{
fn apply_finality(
&self,
operation: &mut ClientImportOperation<Block, Blake2Hasher, B>,
id: BlockId<Block>,
justification: Option<Justification>,
notify: bool,
) -> error::Result<()> {
let last_best = self.backend.blockchain().info().best_hash;
let to_finalize_hash = self.backend.blockchain().expect_block_hash_from_id(&id)?;
self.apply_finality_with_block_hash(operation, to_finalize_hash, justification, last_best, notify)
}
fn finalize_block(&self, id: BlockId<Block>, justification: Option<Justification>, notify: bool) -> error::Result<()> {
self.lock_import_and_run(|operation| {
self.apply_finality(operation, id, justification, notify)
})
}
}
impl<B, E, Block, RA> Finalizer<Block, Blake2Hasher, B> for &Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT<Hash=H256>,
{
fn apply_finality(
&self,
operation: &mut ClientImportOperation<Block, Blake2Hasher, B>,
id: BlockId<Block>,
justification: Option<Justification>,
notify: bool,
) -> error::Result<()> {
(**self).apply_finality(operation, id, justification, notify)
}
fn finalize_block(&self, id: BlockId<Block>, justification: Option<Justification>, notify: bool) -> error::Result<()> {
(**self).finalize_block(id, justification, notify)
}
}
impl<B, E, Block, RA> BlockchainEvents<Block> for Client<B, E, Block, RA>
E: CallExecutor<Block, Blake2Hasher>,
fn import_notification_stream(&self) -> ImportNotifications<Block> {
let (sink, stream) = mpsc::unbounded();
self.import_notification_sinks.lock().push(sink);
stream
fn finality_notification_stream(&self) -> FinalityNotifications<Block> {
let (sink, stream) = mpsc::unbounded();
self.finality_notification_sinks.lock().push(sink);
stream
}
/// Get storage changes event stream.
fn storage_changes_notification_stream(
&self,
filter_keys: Option<&[StorageKey]>,
child_filter_keys: Option<&[(StorageKey, Option<Vec<StorageKey>>)]>,
) -> error::Result<StorageEventStream<Block::Hash>> {
Ok(self.storage_notifications.lock().listen(filter_keys, child_filter_keys))
/// Implement Longest Chain Select implementation
/// where 'longest' is defined as the highest number of blocks
pub struct LongestChain<B, Block> {
backend: Arc<B>,
_phantom: PhantomData<Block>
}
impl<B, Block> Clone for LongestChain<B, Block> {
fn clone(&self) -> Self {
let backend = self.backend.clone();
LongestChain {
backend,
_phantom: Default::default()
}
}
}
impl<B, Block> LongestChain<B, Block>
B: backend::Backend<Block, Blake2Hasher>,
/// Instantiate a new LongestChain for Backend B
pub fn new(backend: Arc<B>) -> Self {
LongestChain {
backend,
_phantom: Default::default()
}
}
fn best_block_header(&self) -> error::Result<<Block as BlockT>::Header> {
let info = self.backend.blockchain().info();
let best_hash = self.best_containing(info.best_hash, None)?
.unwrap_or(info.best_hash);
Ok(self.backend.blockchain().header(BlockId::Hash(best_hash))?
.expect("given block hash was fetched from block in db; qed"))
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
}
/// Get the most recent block hash of the best (longest) chains
/// that contain block with the given `target_hash`.
///
/// The search space is always limited to blocks which are in the finalized
/// chain or descendents of it.
///
/// If `maybe_max_block_number` is `Some(max_block_number)`
/// the search is limited to block `numbers <= max_block_number`.
/// in other words as if there were no blocks greater `max_block_number`.
/// Returns `Ok(None)` if `target_hash` is not found in search space.
/// TODO: document time complexity of this, see [#1444](https://github.com/paritytech/substrate/issues/1444)
fn best_containing(
&self,
target_hash: Block::Hash,
maybe_max_number: Option<NumberFor<Block>>
) -> error::Result<Option<Block::Hash>> {
let target_header = {
match self.backend.blockchain().header(BlockId::Hash(target_hash))? {
Some(x) => x,
// target not in blockchain
None => { return Ok(None); },
}
};
if let Some(max_number) = maybe_max_number {
// target outside search range
if target_header.number() > &max_number {
return Ok(None);
}
}
let leaves = {
// ensure no blocks are imported during this code block.
// an import could trigger a reorg which could change the canonical chain.
// we depend on the canonical chain staying the same during this code block.
let _import_lock = self.backend.get_import_lock().lock();
let info = self.backend.blockchain().info();
// this can be `None` if the best chain is shorter than the target header.
let maybe_canon_hash = self.backend.blockchain().hash(*target_header.number())?;
if maybe_canon_hash.as_ref() == Some(&target_hash) {
// if a `max_number` is given we try to fetch the block at the
// given depth, if it doesn't exist or `max_number` is not
// provided, we continue to search from all leaves below.
if let Some(max_number) = maybe_max_number {
if let Some(header) = self.backend.blockchain().hash(max_number)? {
return Ok(Some(header));
}
}
} else if info.finalized_number >= *target_header.number() {
// header is on a dead fork.
return Ok(None);
}
self.backend.blockchain().leaves()?
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
};
// for each chain. longest chain first. shortest last
for leaf_hash in leaves {
// start at the leaf
let mut current_hash = leaf_hash;
// if search is not restricted then the leaf is the best
let mut best_hash = leaf_hash;
// go backwards entering the search space
// waiting until we are <= max_number
if let Some(max_number) = maybe_max_number {
loop {
let current_header = self.backend.blockchain().header(BlockId::Hash(current_hash.clone()))?
.ok_or_else(|| error::Error::from(format!("failed to get header for hash {}", current_hash)))?;
if current_header.number() <= &max_number {
best_hash = current_header.hash();
break;
}
current_hash = *current_header.parent_hash();
}
}
// go backwards through the chain (via parent links)
loop {
// until we find target
if current_hash == target_hash {
return Ok(Some(best_hash));
}
let current_header = self.backend.blockchain().header(BlockId::Hash(current_hash.clone()))?
.ok_or_else(|| error::Error::from(format!("failed to get header for hash {}", current_hash)))?;
// stop search in this chain once we go below the target's block number
if current_header.number() < target_header.number() {
break;
}
current_hash = *current_header.parent_hash();
}
}
// header may be on a dead fork -- the only leaves that are considered are
// those which can still be finalized.
//
// FIXME #1558 only issue this warning when not on a dead fork
warn!(
"Block {:?} exists in chain but not found when following all \
leaves backwards. Number limit = {:?}",
target_hash,
maybe_max_number,
);
Ok(None)
fn leaves(&self) -> Result<Vec<<Block as BlockT>::Hash>, error::Error> {
self.backend.blockchain().leaves()
}
impl<B, Block> SelectChain<Block> for LongestChain<B, Block>
where
B: backend::Backend<Block, Blake2Hasher>,
Block: BlockT<Hash=H256>,
{
fn leaves(&self) -> Result<Vec<<Block as BlockT>::Hash>, ConsensusError> {
LongestChain::leaves(self)
.map_err(|e| ConsensusError::ChainLookup(e.to_string()).into())
}
fn best_chain(&self)
-> Result<<Block as BlockT>::Header, ConsensusError>
{
LongestChain::best_block_header(&self)
.map_err(|e| ConsensusError::ChainLookup(e.to_string()).into())
}
fn finality_target(
&self,
target_hash: Block::Hash,
maybe_max_number: Option<NumberFor<Block>>
) -> Result<Option<Block::Hash>, ConsensusError> {
LongestChain::best_containing(self, target_hash, maybe_max_number)
.map_err(|e| ConsensusError::ChainLookup(e.to_string()).into())
}
}
impl<B, E, Block, RA> BlockBody<Block> for Client<B, E, Block, RA>
where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT<Hash=H256>,
{
fn block_body(&self, id: &BlockId<Block>) -> error::Result<Option<Vec<<Block as BlockT>::Extrinsic>>> {
self.body(id)
}
}
impl<B, E, Block, RA> backend::AuxStore for Client<B, E, Block, RA>
where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT<Hash=H256>,
{
/// Insert auxiliary data into key-value store.
fn insert_aux<
'a,
'b: 'a,
'c: 'a,
I: IntoIterator<Item=&'a(&'c [u8], &'c [u8])>,
D: IntoIterator<Item=&'a &'b [u8]>,
>(&self, insert: I, delete: D) -> error::Result<()> {
// Import is locked here because we may have other block import
// operations that tries to set aux data. Note that for consensus
// layer, one can always use atomic operations to make sure
// import is only locked once.
self.lock_import_and_run(|operation| {
apply_aux(operation, insert, delete)
}
/// Query auxiliary data from key-value store.
fn get_aux(&self, key: &[u8]) -> error::Result<Option<Vec<u8>>> {
crate::backend::AuxStore::get_aux(&*self.backend, key)
impl<B, E, Block, RA> backend::AuxStore for &Client<B, E, Block, RA>
where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT<Hash=H256>,
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
fn insert_aux<
'a,
'b: 'a,
'c: 'a,
I: IntoIterator<Item=&'a(&'c [u8], &'c [u8])>,
D: IntoIterator<Item=&'a &'b [u8]>,
>(&self, insert: I, delete: D) -> error::Result<()> {
(**self).insert_aux(insert, delete)
}
fn get_aux(&self, key: &[u8]) -> error::Result<Option<Vec<u8>>> {
(**self).get_aux(key)
}
}
/// Helper function to apply auxiliary data insertion into an operation.
pub fn apply_aux<'a, 'b: 'a, 'c: 'a, B, Block, H, D, I>(
operation: &mut ClientImportOperation<Block, H, B>,
insert: I,
delete: D
) -> error::Result<()>
where
Block: BlockT,
H: Hasher<Out=Block::Hash>,
B: backend::Backend<Block, H>,
I: IntoIterator<Item=&'a(&'c [u8], &'c [u8])>,
D: IntoIterator<Item=&'a &'b [u8]>,
{
operation.op.insert_aux(
insert.into_iter()
.map(|(k, v)| (k.to_vec(), Some(v.to_vec())))
.chain(delete.into_iter().map(|k| (k.to_vec(), None)))
)
}
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
/// Utility methods for the client.
pub mod utils {
use super::*;
use crate::{backend::Backend, blockchain, error};
use primitives::H256;
/// Returns a function for checking block ancestry, the returned function will
/// return `true` if the given hash (second parameter) is a descendent of the
/// base (first parameter). If the `current` parameter is defined, it should
/// represent the current block `hash` and its `parent hash`, if given the
/// function that's returned will assume that `hash` isn't part of the local DB
/// yet, and all searches in the DB will instead reference the parent.
pub fn is_descendent_of<'a, B, E, Block: BlockT<Hash=H256>, RA>(
client: &'a Client<B, E, Block, RA>,
current: Option<(&'a H256, &'a H256)>,
) -> impl Fn(&H256, &H256) -> Result<bool, error::Error> + 'a
where B: Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync,
{
move |base, hash| {
if base == hash { return Ok(false); }
let mut hash = hash;
if let Some((current_hash, current_parent_hash)) = current {
if base == current_hash { return Ok(false); }
if hash == current_hash {
if base == current_parent_hash {
return Ok(true);
} else {
hash = current_parent_hash;
}
}
}
let tree_route = blockchain::tree_route(
|id| client.header(&id)?.ok_or_else(|| Error::UnknownBlock(format!("{:?}", id))),
BlockId::Hash(*hash),
BlockId::Hash(*base),
)?;
Ok(tree_route.common_block().hash == *base)
}
}
}
pub(crate) mod tests {
use std::collections::HashMap;
use primitives::blake2_256;
use consensus::{BlockOrigin, SelectChain};
use test_client::{
client_db::{Backend, DatabaseSettings, PruningMode},
runtime::{self, Block, Transfer, RuntimeApi, TestAPI},
/// Returns tuple, consisting of:
/// 1) test client pre-filled with blocks changing balances;
/// 2) roots of changes tries for these blocks
/// 3) test cases in form (begin, end, key, vec![(block, extrinsic)]) that are required to pass
pub fn prepare_client_with_key_changes() -> (
Bastian Köcher
committed
test_client::client::Client<test_client::Backend, test_client::Executor, Block, RuntimeApi>,
Vec<H256>,
Vec<(u64, u64, Vec<u8>, Vec<(u64, u32)>)>,
) {
// prepare block structure
let blocks_transfers = vec![
vec![(AccountKeyring::Alice, AccountKeyring::Dave), (AccountKeyring::Bob, AccountKeyring::Dave)],
vec![(AccountKeyring::Charlie, AccountKeyring::Eve)],
vec![(AccountKeyring::Alice, AccountKeyring::Dave)],
];
// prepare client ang import blocks
let mut local_roots = Vec::new();
let remote_client = TestClientBuilder::new().set_support_changes_trie(true).build();
let mut nonces: HashMap<_, u64> = Default::default();
for (i, block_transfers) in blocks_transfers.into_iter().enumerate() {
let mut builder = remote_client.new_block(Default::default()).unwrap();
for (from, to) in block_transfers {
builder.push_transfer(Transfer {
from: from.into(),
to: to.into(),
amount: 1,
nonce: *nonces.entry(from).and_modify(|n| { *n = *n + 1 }).or_default(),
}).unwrap();
}
remote_client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
let header = remote_client.header(&BlockId::Number(i as u64 + 1)).unwrap().unwrap();
let trie_root = header.digest().log(DigestItem::as_changes_trie_root)
.map(|root| H256::from_slice(root.as_ref()))
.unwrap();
local_roots.push(trie_root);
}
// prepare test cases
let alice = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into())).to_vec();
let bob = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Bob.into())).to_vec();
let charlie = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Charlie.into())).to_vec();