Newer
Older
let (_, storage_update, changes_update) = self.executor
.call_at_state::<_, _, NeverNativeValue, fn() -> _>(
transaction_state,
&mut overlay,
"Core_execute_block",
&encoded_block,
match origin {
BlockOrigin::NetworkInitialSync => get_execution_manager(
self.execution_extensions().strategies().syncing,
_ => get_execution_manager(self.execution_extensions().strategies().importing),
},
None,
None,
)?;
overlay.commit_prospective();
let (top, children) = overlay.into_committed();
let children = children.map(|(sk, it)| (sk, it.0.collect())).collect();
if import_headers.post().state_root() != &storage_update.1 {
Benjamin Kampmann
committed
return Err(sp_blockchain::Error::InvalidStateRoot);
}
Ok((Some(storage_update.0), Some(changes_update), Some((top.collect(), children))))
},
None => Ok((None, None, None))
}
}
fn apply_finality_with_block_hash(
operation: &mut ClientImportOperation<Block, Blake2Hasher, B>,
block: Block::Hash,
justification: Option<Justification>,
best_block: Block::Hash,
notify: bool,
Benjamin Kampmann
committed
) -> sp_blockchain::Result<()> {
// find tree route from last finalized to given block.
let last_finalized = self.backend.blockchain().last_finalized()?;
if block == last_finalized {
warn!("Possible safety violation: attempted to re-finalize last finalized block {:?} ", last_finalized);
return Ok(());
}
Benjamin Kampmann
committed
let route_from_finalized = sp_blockchain::tree_route(self.backend.blockchain(), last_finalized, block)?;
if let Some(retracted) = route_from_finalized.retracted().get(0) {
warn!("Safety violation: attempted to revert finalized block {:?} which is not in the \
same chain as last finalized {:?}", retracted, last_finalized);
Benjamin Kampmann
committed
return Err(sp_blockchain::Error::NotInFinalizedChain);
Benjamin Kampmann
committed
let route_from_best = sp_blockchain::tree_route(self.backend.blockchain(), best_block, block)?;
// if the block is not a direct ancestor of the current best chain,
// then some other block is the common ancestor.
if route_from_best.common_block().hash != block {
// NOTE: we're setting the finalized block as best block, this might
// be slightly inaccurate since we might have a "better" block
// further along this chain, but since best chain selection logic is
// pluggable we cannot make a better choice here. usages that need
// an accurate "best" block need to go through `SelectChain`
// instead.
operation.op.mark_head(BlockId::Hash(block))?;
let enacted = route_from_finalized.enacted();
assert!(enacted.len() > 0);
for finalize_new in &enacted[..enacted.len() - 1] {
operation.op.mark_finalized(BlockId::Hash(finalize_new.hash), None)?;
assert_eq!(enacted.last().map(|e| e.hash), Some(block));
operation.op.mark_finalized(BlockId::Hash(block), justification)?;
if notify {
// sometimes when syncing, tons of blocks can be finalized at once.
// we'll send notifications spuriously in that case.
const MAX_TO_NOTIFY: usize = 256;
let enacted = route_from_finalized.enacted();
let start = enacted.len() - ::std::cmp::min(enacted.len(), MAX_TO_NOTIFY);
for finalized in &enacted[start..] {
operation.notify_finalized.push(finalized.hash);
}
}
Ok(())
}
fn notify_finalized(
&self,
notify_finalized: Vec<Block::Hash>,
Benjamin Kampmann
committed
) -> sp_blockchain::Result<()> {
let mut sinks = self.finality_notification_sinks.lock();
for finalized_hash in notify_finalized {
let header = self.header(&BlockId::Hash(finalized_hash))?
.expect("header already known to exist in DB because it is indicated in the tree route; qed");
telemetry!(SUBSTRATE_INFO; "notify.finalized";
"height" => format!("{}", header.number()),
"best" => ?finalized_hash,
);
let notification = FinalityNotification {
header,
hash: finalized_hash,
};
sinks.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
}
Ok(())
}
Benjamin Kampmann
committed
fn notify_imported(&self, notify_import: ImportSummary<Block>) -> sp_blockchain::Result<()> {
if let Some(storage_changes) = notify_import.storage_changes {
// TODO [ToDr] How to handle re-orgs? Should we re-emit all storage changes?
self.storage_notifications.lock()
¬ify_import.hash,
storage_changes.0.into_iter(),
storage_changes.1.into_iter().map(|(sk, v)| (sk, v.into_iter())),
);
}
let notification = BlockImportNotification::<Block> {
hash: notify_import.hash,
origin: notify_import.origin,
header: notify_import.header,
is_new_best: notify_import.is_new_best,
retracted: notify_import.retracted,
};
self.import_notification_sinks.lock()
.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
Ok(())
}
/// Attempts to revert the chain by `n` blocks. Returns the number of blocks that were
/// successfully reverted.
Benjamin Kampmann
committed
pub fn revert(&self, n: NumberFor<Block>) -> sp_blockchain::Result<NumberFor<Block>> {
/// Get usage info about current client.
pub fn usage_info(&self) -> ClientInfo<Block> {
used_state_cache_size: self.backend.used_state_cache_size(),
/// Get blockchain info.
pub fn chain_info(&self) -> blockchain::Info<Block> {
self.backend.blockchain().info()
}
Benjamin Kampmann
committed
pub fn block_status(&self, id: &BlockId<Block>) -> sp_blockchain::Result<BlockStatus> {
// this can probably be implemented more efficiently
if let BlockId::Hash(ref h) = id {
if self.importing_block.read().as_ref().map_or(false, |importing| h == importing) {
return Ok(BlockStatus::Queued);
}
}
let hash_and_number = match id.clone() {
BlockId::Hash(hash) => self.backend.blockchain().number(hash)?.map(|n| (hash, n)),
BlockId::Number(n) => self.backend.blockchain().hash(n)?.map(|hash| (hash, n)),
};
match hash_and_number {
Some((hash, number)) => {
if self.backend.have_state_at(&hash, number) {
Ok(BlockStatus::InChainWithState)
} else {
Ok(BlockStatus::InChainPruned)
}
}
None => Ok(BlockStatus::Unknown),
}
}
/// Get block header by id.
Benjamin Kampmann
committed
pub fn header(&self, id: &BlockId<Block>) -> sp_blockchain::Result<Option<<Block as BlockT>::Header>> {
self.backend.blockchain().header(*id)
}
/// Get block body by id.
Benjamin Kampmann
committed
pub fn body(&self, id: &BlockId<Block>) -> sp_blockchain::Result<Option<Vec<<Block as BlockT>::Extrinsic>>> {
self.backend.blockchain().body(*id)
}
/// Get block justification set by id.
Benjamin Kampmann
committed
pub fn justification(&self, id: &BlockId<Block>) -> sp_blockchain::Result<Option<Justification>> {
self.backend.blockchain().justification(*id)
}
pub fn block(&self, id: &BlockId<Block>)
Benjamin Kampmann
committed
-> sp_blockchain::Result<Option<SignedBlock<Block>>>
Ok(match (self.header(id)?, self.body(id)?, self.justification(id)?) {
(Some(header), Some(extrinsics), justification) =>
Some(SignedBlock { block: Block::new(header, extrinsics), justification }),
/// Gets the uncles of the block with `target_hash` going back `max_generation` ancestors.
Benjamin Kampmann
committed
pub fn uncles(&self, target_hash: Block::Hash, max_generation: NumberFor<Block>) -> sp_blockchain::Result<Vec<Block::Hash>> {
let load_header = |id: Block::Hash| -> sp_blockchain::Result<Block::Header> {
match self.backend.blockchain().header(BlockId::Hash(id))? {
Some(hdr) => Ok(hdr),
None => Err(Error::UnknownBlock(format!("{:?}", id))),
let genesis_hash = self.backend.blockchain().info().genesis_hash;
if genesis_hash == target_hash { return Ok(Vec::new()); }
let mut current_hash = target_hash;
let mut current = load_header(current_hash)?;
let mut ancestor_hash = *current.parent_hash();
let mut ancestor = load_header(ancestor_hash)?;
let mut uncles = Vec::new();
for _generation in 0..max_generation.saturated_into() {
let children = self.backend.blockchain().children(ancestor_hash)?;
uncles.extend(children.into_iter().filter(|h| h != ¤t_hash));
current_hash = ancestor_hash;
if genesis_hash == current_hash { break; }
current = ancestor;
ancestor_hash = *current.parent_hash();
ancestor = load_header(ancestor_hash)?;
}
trace!("Collected {} uncles", uncles.len());
fn changes_trie_config(&self) -> Result<Option<ChangesTrieConfiguration>, Error> {
Ok(self.backend.state_at(BlockId::Number(self.backend.blockchain().info().best_number))?
.storage(well_known_keys::CHANGES_TRIE_CONFIG)
Benjamin Kampmann
committed
.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
.and_then(|c| Decode::decode(&mut &*c).ok()))
/// Prepare in-memory header that is used in execution environment.
Benjamin Kampmann
committed
fn prepare_environment_block(&self, parent: &BlockId<Block>) -> sp_blockchain::Result<Block::Header> {
Arkadiy Paronyan
committed
let parent_header = self.backend.blockchain().expect_header(*parent)?;
Ok(<<Block as BlockT>::Header as HeaderT>::new(
self.backend.blockchain().expect_block_number_from_id(parent)? + One::one(),
Default::default(),
Default::default(),
Default::default(),
))
}
impl<B, E, Block, RA> HeaderMetadata<Block> for Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT<Hash=H256>,
{
Benjamin Kampmann
committed
type Error = sp_blockchain::Error;
fn header_metadata(&self, hash: Block::Hash) -> Result<CachedHeaderMetadata<Block>, Self::Error> {
self.backend.blockchain().header_metadata(hash)
}
fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata<Block>) {
self.backend.blockchain().insert_header_metadata(hash, metadata)
}
fn remove_header_metadata(&self, hash: Block::Hash) {
self.backend.blockchain().remove_header_metadata(hash)
}
}
impl<B, E, Block, RA> ProvideUncles<Block> for Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT<Hash=H256>,
{
Benjamin Kampmann
committed
fn uncles(&self, target_hash: Block::Hash, max_generation: NumberFor<Block>) -> sp_blockchain::Result<Vec<Block::Header>> {
Ok(Client::uncles(self, target_hash, max_generation)?
.into_iter()
.filter_map(|hash| Client::header(self, &BlockId::Hash(hash)).unwrap_or(None))
.collect()
)
}
}
impl<B, E, Block, RA> ChainHeaderBackend<Block> for Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync,
Block: BlockT<Hash=H256>,
RA: Send + Sync,
Benjamin Kampmann
committed
fn header(&self, id: BlockId<Block>) -> sp_blockchain::Result<Option<Block::Header>> {
self.backend.blockchain().header(id)
}
fn info(&self) -> blockchain::Info<Block> {
self.backend.blockchain().info()
}
Benjamin Kampmann
committed
fn status(&self, id: BlockId<Block>) -> sp_blockchain::Result<blockchain::BlockStatus> {
self.backend.blockchain().status(id)
}
Benjamin Kampmann
committed
fn number(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<<<Block as BlockT>::Header as HeaderT>::Number>> {
self.backend.blockchain().number(hash)
}
Benjamin Kampmann
committed
fn hash(&self, number: NumberFor<Block>) -> sp_blockchain::Result<Option<Block::Hash>> {
self.backend.blockchain().hash(number)
}
}
impl<B, E, Block, RA> sp_runtime::traits::BlockIdTo<Block> for Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync,
Block: BlockT<Hash=H256>,
RA: Send + Sync,
{
type Error = Error;
Benjamin Kampmann
committed
fn to_hash(&self, block_id: &BlockId<Block>) -> sp_blockchain::Result<Option<Block::Hash>> {
self.block_hash_from_id(block_id)
}
Benjamin Kampmann
committed
fn to_number(&self, block_id: &BlockId<Block>) -> sp_blockchain::Result<Option<NumberFor<Block>>> {
self.block_number_from_id(block_id)
}
}
impl<B, E, Block, RA> ChainHeaderBackend<Block> for &Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync,
Block: BlockT<Hash=H256>,
RA: Send + Sync,
{
Benjamin Kampmann
committed
fn header(&self, id: BlockId<Block>) -> sp_blockchain::Result<Option<Block::Header>> {
(**self).backend.blockchain().header(id)
}
fn info(&self) -> blockchain::Info<Block> {
(**self).backend.blockchain().info()
}
Benjamin Kampmann
committed
fn status(&self, id: BlockId<Block>) -> sp_blockchain::Result<blockchain::BlockStatus> {
Benjamin Kampmann
committed
fn number(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<<<Block as BlockT>::Header as HeaderT>::Number>> {
Benjamin Kampmann
committed
fn hash(&self, number: NumberFor<Block>) -> sp_blockchain::Result<Option<Block::Hash>> {
(**self).hash(number)
}
}
impl<B, E, Block, RA> ProvideCache<Block> for Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
Block: BlockT<Hash=H256>,
{
fn cache(&self) -> Option<Arc<dyn Cache<Block>>> {
self.backend.blockchain().cache()
}
}
impl<B, E, Block, RA> ProvideRuntimeApi for Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher> + Clone + Send + Sync,
RA: ConstructRuntimeApi<Block, Self>
type Api = <RA as ConstructRuntimeApi<Block, Self>>::RuntimeApi;
fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> {
RA::construct_runtime_api(self)
}
}
impl<B, E, Block, RA> CallRuntimeAt<Block> for Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher> + Clone + Send + Sync,
type Error = Error;
Bastian Köcher
committed
fn call_api_at<
Bastian Köcher
committed
NC: FnOnce() -> result::Result<R, String> + UnwindSafe,
C: CoreApi<Block, Error = Error>,
Bastian Köcher
committed
>(
&self,
at: &BlockId<Block>,
function: &'static str,
args: Vec<u8>,
changes: &RefCell<OverlayedChanges>,
initialize_block: InitializeBlock<'a, Block>,
native_call: Option<NC>,
recorder: &Option<ProofRecorder<Block>>,
Benjamin Kampmann
committed
) -> sp_blockchain::Result<NativeOrEncoded<R>> {
let (manager, extensions) = self.execution_extensions.manager_and_extensions(at, context);
self.executor.contextual_call::<_, fn(_,_) -> _,_,_>(
|| core_api.initialize_block(at, &self.prepare_environment_block(at)?),
at,
function,
&args,
changes,
Benjamin Kampmann
committed
fn runtime_version_at(&self, at: &BlockId<Block>) -> sp_blockchain::Result<RuntimeVersion> {
self.runtime_version_at(at)
}
}
/// NOTE: only use this implementation when you are sure there are NO consensus-level BlockImport
/// objects. Otherwise, importing blocks directly into the client would be bypassing
/// important verification work.
impl<'a, B, E, Block, RA> sp_consensus::BlockImport<Block> for &'a Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher> + Clone + Send + Sync,
Block: BlockT<Hash=H256>,
type Error = ConsensusError;
/// Import a checked and validated block. If a justification is provided in
/// `BlockImportParams` then `finalized` *must* be true.
///
/// NOTE: only use this implementation when there are NO consensus-level BlockImport
/// objects. Otherwise, importing blocks directly into the client would be bypassing
/// important verification work.
///
/// If you are not sure that there are no BlockImport objects provided by the consensus
/// algorithm, don't use this function.
&mut self,
import_block: BlockImportParams<Block>,
new_cache: HashMap<CacheKeyId, Vec<u8>>,
) -> Result<ImportResult, Self::Error> {
self.lock_import_and_run(|operation| {
self.apply_block(operation, import_block, new_cache)
}).map_err(|e| {
warn!("Block import error:\n{:?}", e);
ConsensusError::ClientImport(e.to_string()).into()
})
/// Check block preconditions.
fn check_block(
&mut self,
) -> Result<ImportResult, Self::Error> {
let BlockCheckParams { hash, number, parent_hash, allow_missing_state, import_existing } = block;
let fork_block = self.fork_blocks.as_ref()
.and_then(|fs| fs.iter().find(|(n, _)| *n == number));
if let Some((_, h)) = fork_block {
if &hash != h {
trace!(
"Rejecting block from known invalid fork. Got {:?}, expected: {:?} at height {}",
hash,
h,
number
);
return Ok(ImportResult::KnownBad);
}
}
Arkadiy Paronyan
committed
// Own status must be checked first. If the block and ancestry is pruned
// this function must return `AlreadyInChain` rather than `MissingState`
match self.block_status(&BlockId::Hash(hash))
.map_err(|e| ConsensusError::ClientImport(e.to_string()))?
BlockStatus::InChainWithState | BlockStatus::Queued if !import_existing => return Ok(ImportResult::AlreadyInChain),
BlockStatus::InChainWithState | BlockStatus::Queued => {},
Arkadiy Paronyan
committed
BlockStatus::InChainPruned => return Ok(ImportResult::AlreadyInChain),
BlockStatus::Unknown => {},
BlockStatus::KnownBad => return Ok(ImportResult::KnownBad),
Arkadiy Paronyan
committed
match self.block_status(&BlockId::Hash(parent_hash))
.map_err(|e| ConsensusError::ClientImport(e.to_string()))?
{
BlockStatus::InChainWithState | BlockStatus::Queued => {},
BlockStatus::Unknown => return Ok(ImportResult::UnknownParent),
BlockStatus::InChainPruned if allow_missing_state => {},
BlockStatus::InChainPruned => return Ok(ImportResult::MissingState),
BlockStatus::KnownBad => return Ok(ImportResult::KnownBad),
}
Ok(ImportResult::imported(false))
impl<B, E, Block, RA> sp_consensus::BlockImport<Block> for Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher> + Clone + Send + Sync,
Block: BlockT<Hash=H256>,
{
type Error = ConsensusError;
fn import_block(
&mut self,
import_block: BlockImportParams<Block>,
new_cache: HashMap<CacheKeyId, Vec<u8>>,
) -> Result<ImportResult, Self::Error> {
(&*self).import_block(import_block, new_cache)
}
fn check_block(
&mut self,
) -> Result<ImportResult, Self::Error> {
}
}
impl<B, E, Block, RA> Finalizer<Block, Blake2Hasher, B> for Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT<Hash=H256>,
{
fn apply_finality(
&self,
operation: &mut ClientImportOperation<Block, Blake2Hasher, B>,
id: BlockId<Block>,
justification: Option<Justification>,
notify: bool,
Benjamin Kampmann
committed
) -> sp_blockchain::Result<()> {
let last_best = self.backend.blockchain().info().best_hash;
let to_finalize_hash = self.backend.blockchain().expect_block_hash_from_id(&id)?;
self.apply_finality_with_block_hash(operation, to_finalize_hash, justification, last_best, notify)
}
Benjamin Kampmann
committed
fn finalize_block(&self, id: BlockId<Block>, justification: Option<Justification>, notify: bool) -> sp_blockchain::Result<()> {
self.lock_import_and_run(|operation| {
self.apply_finality(operation, id, justification, notify)
})
}
}
impl<B, E, Block, RA> Finalizer<Block, Blake2Hasher, B> for &Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT<Hash=H256>,
{
fn apply_finality(
&self,
operation: &mut ClientImportOperation<Block, Blake2Hasher, B>,
id: BlockId<Block>,
justification: Option<Justification>,
notify: bool,
Benjamin Kampmann
committed
) -> sp_blockchain::Result<()> {
(**self).apply_finality(operation, id, justification, notify)
}
Benjamin Kampmann
committed
fn finalize_block(&self, id: BlockId<Block>, justification: Option<Justification>, notify: bool) -> sp_blockchain::Result<()> {
(**self).finalize_block(id, justification, notify)
}
}
impl<B, E, Block, RA> BlockchainEvents<Block> for Client<B, E, Block, RA>
E: CallExecutor<Block, Blake2Hasher>,
fn import_notification_stream(&self) -> ImportNotifications<Block> {
let (sink, stream) = mpsc::unbounded();
self.import_notification_sinks.lock().push(sink);
stream
fn finality_notification_stream(&self) -> FinalityNotifications<Block> {
let (sink, stream) = mpsc::unbounded();
self.finality_notification_sinks.lock().push(sink);
stream
}
/// Get storage changes event stream.
fn storage_changes_notification_stream(
&self,
filter_keys: Option<&[StorageKey]>,
child_filter_keys: Option<&[(StorageKey, Option<Vec<StorageKey>>)]>,
Benjamin Kampmann
committed
) -> sp_blockchain::Result<StorageEventStream<Block::Hash>> {
Ok(self.storage_notifications.lock().listen(filter_keys, child_filter_keys))
/// Implement Longest Chain Select implementation
/// where 'longest' is defined as the highest number of blocks
pub struct LongestChain<B, Block> {
backend: Arc<B>,
_phantom: PhantomData<Block>
}
impl<B, Block> Clone for LongestChain<B, Block> {
fn clone(&self) -> Self {
let backend = self.backend.clone();
LongestChain {
backend,
_phantom: Default::default()
}
}
}
impl<B, Block> LongestChain<B, Block>
B: backend::Backend<Block, Blake2Hasher>,
/// Instantiate a new LongestChain for Backend B
pub fn new(backend: Arc<B>) -> Self {
LongestChain {
backend,
_phantom: Default::default()
}
}
Benjamin Kampmann
committed
fn best_block_header(&self) -> sp_blockchain::Result<<Block as BlockT>::Header> {
let info = self.backend.blockchain().info();
let import_lock = self.backend.get_import_lock();
let best_hash = self.backend.blockchain().best_containing(info.best_hash, None, import_lock)?
.unwrap_or(info.best_hash);
Ok(self.backend.blockchain().header(BlockId::Hash(best_hash))?
.expect("given block hash was fetched from block in db; qed"))
Benjamin Kampmann
committed
fn leaves(&self) -> Result<Vec<<Block as BlockT>::Hash>, sp_blockchain::Error> {
self.backend.blockchain().leaves()
}
impl<B, Block> SelectChain<Block> for LongestChain<B, Block>
where
B: backend::Backend<Block, Blake2Hasher>,
Block: BlockT<Hash=H256>,
{
fn leaves(&self) -> Result<Vec<<Block as BlockT>::Hash>, ConsensusError> {
LongestChain::leaves(self)
.map_err(|e| ConsensusError::ChainLookup(e.to_string()).into())
}
fn best_chain(&self)
-> Result<<Block as BlockT>::Header, ConsensusError>
{
LongestChain::best_block_header(&self)
.map_err(|e| ConsensusError::ChainLookup(e.to_string()).into())
}
fn finality_target(
&self,
target_hash: Block::Hash,
maybe_max_number: Option<NumberFor<Block>>
) -> Result<Option<Block::Hash>, ConsensusError> {
let import_lock = self.backend.get_import_lock();
self.backend.blockchain().best_containing(target_hash, maybe_max_number, import_lock)
.map_err(|e| ConsensusError::ChainLookup(e.to_string()).into())
}
}
impl<B, E, Block, RA> BlockBody<Block> for Client<B, E, Block, RA>
where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT<Hash=H256>,
Benjamin Kampmann
committed
fn block_body(&self, id: &BlockId<Block>) -> sp_blockchain::Result<Option<Vec<<Block as BlockT>::Extrinsic>>> {
self.body(id)
}
}
impl<B, E, Block, RA> backend::AuxStore for Client<B, E, Block, RA>
where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT<Hash=H256>,
{
/// Insert auxiliary data into key-value store.
fn insert_aux<
'a,
'b: 'a,
'c: 'a,
I: IntoIterator<Item=&'a(&'c [u8], &'c [u8])>,
D: IntoIterator<Item=&'a &'b [u8]>,
Benjamin Kampmann
committed
>(&self, insert: I, delete: D) -> sp_blockchain::Result<()> {
// Import is locked here because we may have other block import
// operations that tries to set aux data. Note that for consensus
// layer, one can always use atomic operations to make sure
// import is only locked once.
self.lock_import_and_run(|operation| {
apply_aux(operation, insert, delete)
}
/// Query auxiliary data from key-value store.
Benjamin Kampmann
committed
fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result<Option<Vec<u8>>> {
backend::AuxStore::get_aux(&*self.backend, key)
impl<B, E, Block, RA> backend::AuxStore for &Client<B, E, Block, RA>
where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT<Hash=H256>,
fn insert_aux<
'a,
'b: 'a,
'c: 'a,
I: IntoIterator<Item=&'a(&'c [u8], &'c [u8])>,
D: IntoIterator<Item=&'a &'b [u8]>,
Benjamin Kampmann
committed
>(&self, insert: I, delete: D) -> sp_blockchain::Result<()> {
(**self).insert_aux(insert, delete)
}
Benjamin Kampmann
committed
fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result<Option<Vec<u8>>> {
(**self).get_aux(key)
}
}
/// Helper function to apply auxiliary data insertion into an operation.
pub fn apply_aux<'a, 'b: 'a, 'c: 'a, B, Block, H, D, I>(
operation: &mut ClientImportOperation<Block, H, B>,
insert: I,
delete: D
Benjamin Kampmann
committed
) -> sp_blockchain::Result<()>
where
Block: BlockT,
H: Hasher<Out=Block::Hash>,
B: backend::Backend<Block, H>,
I: IntoIterator<Item=&'a(&'c [u8], &'c [u8])>,
D: IntoIterator<Item=&'a &'b [u8]>,
{
operation.op.insert_aux(
insert.into_iter()
.map(|(k, v)| (k.to_vec(), Some(v.to_vec())))
.chain(delete.into_iter().map(|k| (k.to_vec(), None)))
)
}
impl<BE, E, B, RA> sp_consensus::block_validation::Chain<B> for Client<BE, E, B, RA>
Bastian Köcher
committed
where
BE: backend::Backend<B, Blake2Hasher>,
E: CallExecutor<B, Blake2Hasher>,
B: BlockT<Hash = H256>
{
fn block_status(&self, id: &BlockId<B>) -> Result<BlockStatus, Box<dyn std::error::Error + Send>> {
Client::block_status(self, id).map_err(|e| Box::new(e) as Box<_>)
}
}
pub(crate) mod tests {
use std::collections::HashMap;
use sp_core::blake2_256;
use sp_consensus::{BlockOrigin, SelectChain, BlockImport};
use substrate_test_runtime_client::{
client_ext::ClientExt,
sc_client_db::{Backend, DatabaseSettings, DatabaseSettingsSrc, PruningMode},
runtime::{self, Block, Transfer, RuntimeApi, TestAPI},
/// Returns tuple, consisting of:
/// 1) test client pre-filled with blocks changing balances;
/// 2) roots of changes tries for these blocks
/// 3) test cases in form (begin, end, key, vec![(block, extrinsic)]) that are required to pass
pub fn prepare_client_with_key_changes() -> (
substrate_test_runtime_client::sc_client::Client<substrate_test_runtime_client::Backend, substrate_test_runtime_client::Executor, Block, RuntimeApi>,
Vec<H256>,
Vec<(u64, u64, Vec<u8>, Vec<(u64, u32)>)>,
) {
// prepare block structure
let blocks_transfers = vec![
vec![(AccountKeyring::Alice, AccountKeyring::Dave), (AccountKeyring::Bob, AccountKeyring::Dave)],
vec![(AccountKeyring::Charlie, AccountKeyring::Eve)],
vec![(AccountKeyring::Alice, AccountKeyring::Dave)],
];
// prepare client ang import blocks
let mut local_roots = Vec::new();
let remote_client = TestClientBuilder::new().set_support_changes_trie(true).build();
let mut nonces: HashMap<_, u64> = Default::default();
for (i, block_transfers) in blocks_transfers.into_iter().enumerate() {
let mut builder = remote_client.new_block(Default::default()).unwrap();
for (from, to) in block_transfers {
builder.push_transfer(Transfer {
from: from.into(),
to: to.into(),
amount: 1,
nonce: *nonces.entry(from).and_modify(|n| { *n = *n + 1 }).or_default(),
}).unwrap();
}
remote_client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
let header = remote_client.header(&BlockId::Number(i as u64 + 1)).unwrap().unwrap();
let trie_root = header.digest().log(DigestItem::as_changes_trie_root)
.map(|root| H256::from_slice(root.as_ref()))
.unwrap();
local_roots.push(trie_root);
}
// prepare test cases
let alice = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into())).to_vec();
let bob = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Bob.into())).to_vec();
let charlie = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Charlie.into())).to_vec();
let dave = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec();
let eve = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Eve.into())).to_vec();
let ferdie = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Ferdie.into())).to_vec();
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
let test_cases = vec![
(1, 4, alice.clone(), vec![(4, 0), (1, 0)]),
(1, 3, alice.clone(), vec![(1, 0)]),
(2, 4, alice.clone(), vec![(4, 0)]),
(2, 3, alice.clone(), vec![]),
(1, 4, bob.clone(), vec![(1, 1)]),
(1, 1, bob.clone(), vec![(1, 1)]),
(2, 4, bob.clone(), vec![]),
(1, 4, charlie.clone(), vec![(2, 0)]),
(1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]),
(1, 1, dave.clone(), vec![(1, 1), (1, 0)]),
(3, 4, dave.clone(), vec![(4, 0)]),
(1, 4, eve.clone(), vec![(2, 0)]),
(1, 1, eve.clone(), vec![]),
(3, 4, eve.clone(), vec![]),
(1, 4, ferdie.clone(), vec![]),
];
(remote_client, local_roots, test_cases)
}
fn client_initializes_from_genesis_ok() {
let client = substrate_test_runtime_client::new();
assert_eq!(
client.runtime_api().balance_of(
&BlockId::Number(client.chain_info().best_number),
AccountKeyring::Alice.into()
).unwrap(),
1000
);
assert_eq!(
client.runtime_api().balance_of(
&BlockId::Number(client.chain_info().best_number),
AccountKeyring::Ferdie.into()
).unwrap(),
0
);
}
#[test]
fn block_builder_works_with_no_transactions() {
let client = substrate_test_runtime_client::new();
let builder = client.new_block(Default::default()).unwrap();
client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
assert_eq!(client.chain_info().best_number, 1);
}
#[test]
fn block_builder_works_with_transactions() {
let client = substrate_test_runtime_client::new();
let mut builder = client.new_block(Default::default()).unwrap();
builder.push_transfer(Transfer {
from: AccountKeyring::Alice.into(),
to: AccountKeyring::Ferdie.into(),
client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
assert_eq!(client.chain_info().best_number, 1);
assert_ne!(
client.state_at(&BlockId::Number(1)).unwrap().pairs(),
client.state_at(&BlockId::Number(0)).unwrap().pairs()
);
assert_eq!(
client.runtime_api().balance_of(
&BlockId::Number(client.chain_info().best_number),
AccountKeyring::Alice.into()
).unwrap(),
958
);
assert_eq!(
client.runtime_api().balance_of(
&BlockId::Number(client.chain_info().best_number),
AccountKeyring::Ferdie.into()
).unwrap(),
42
);
#[test]
fn block_builder_does_not_include_invalid() {
let client = substrate_test_runtime_client::new();
let mut builder = client.new_block(Default::default()).unwrap();
builder.push_transfer(Transfer {
from: AccountKeyring::Alice.into(),
to: AccountKeyring::Ferdie.into(),
assert!(builder.push_transfer(Transfer {
from: AccountKeyring::Eve.into(),
to: AccountKeyring::Alice.into(),
client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
assert_eq!(client.chain_info().best_number, 1);
assert_ne!(
client.state_at(&BlockId::Number(1)).unwrap().pairs(),
client.state_at(&BlockId::Number(0)).unwrap().pairs()
);
assert_eq!(client.body(&BlockId::Number(1)).unwrap().unwrap().len(), 1)
}
#[test]
fn best_containing_with_genesis_block() {
// block tree:
// G
let (client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain();
let genesis_hash = client.chain_info().genesis_hash;
assert_eq!(
genesis_hash.clone(),
longest_chain_select.finality_target(genesis_hash.clone(), None).unwrap().unwrap()
);
}
#[test]
fn best_containing_with_hash_not_found() {
// block tree:
// G
let (client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain();
let uninserted_block = client.new_block(Default::default()).unwrap().bake().unwrap();
assert_eq!(
None,
longest_chain_select.finality_target(uninserted_block.hash().clone(), None).unwrap()
);
#[test]
fn uncles_with_only_ancestors() {
// block tree:
// G -> A1 -> A2
let client = substrate_test_runtime_client::new();
let a1 = client.new_block(Default::default()).unwrap().bake().unwrap();