Newer
Older
// Copyright 2017-2018 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! Substrate Client
Bastian Köcher
committed
use std::{marker::PhantomData, collections::{HashSet, BTreeMap}, sync::Arc, panic::UnwindSafe, result};
use parking_lot::{Mutex, RwLock};
use primitives::NativeOrEncoded;
use consensus::{
Error as ConsensusError, ErrorKind as ConsensusErrorKind, ImportBlock, ImportResult,
BlockOrigin, ForkChoiceStrategy
};
use runtime_primitives::traits::{
Block as BlockT, Header as HeaderT, Zero, As, NumberFor, CurrentHeight, BlockNumberToHash,
ApiRef, ProvideRuntimeApi, Digest, DigestItem, AuthorityIdFor
};
use runtime_primitives::BuildStorage;
use crate::runtime_api::{CallRuntimeAt, ConstructRuntimeApi};
use primitives::{Blake2Hasher, H256, ChangesTrieConfiguration, convert_hash, NeverNativeValue};
use primitives::storage::{StorageKey, StorageData};
use primitives::storage::well_known_keys;
use codec::{Encode, Decode};
use state_machine::{
DBValue, Backend as StateBackend, CodeExecutor, ChangesTrieAnchorBlockId,
ExecutionStrategy, ExecutionManager, prove_read,
ChangesTrieRootsStorage, ChangesTrieStorage,
key_changes, key_changes_proof, OverlayedChanges,
use hash_db::Hasher;
use crate::backend::{self, BlockImportOperation, PrunableStateChangesTrieStorage};
use crate::blockchain::{
self, Info as ChainInfo, Backend as ChainBackend, HeaderBackend as ChainHeaderBackend
};
use crate::call_executor::{CallExecutor, LocalCallExecutor};
use crate::notifications::{StorageNotifications, StorageEventStream};
use crate::light::{call_executor::prove_execution, fetcher::ChangesProof};
use crate::cht;
use crate::error;
use crate::in_mem;
use crate::block_builder::{self, api::BlockBuilder as BlockBuilderAPI};
use crate::genesis;
use consensus;
use substrate_telemetry::telemetry;
use log::{info, trace, warn};
use error_chain::bail;
/// Type that implements `futures::Stream` of block import events.
pub type ImportNotifications<Block> = mpsc::UnboundedReceiver<BlockImportNotification<Block>>;
/// A stream of block finality notifications.
pub type FinalityNotifications<Block> = mpsc::UnboundedReceiver<FinalityNotification<Block>>;
type StorageUpdate<B, Block> = <<<B as backend::Backend<Block, Blake2Hasher>>::BlockImportOperation as BlockImportOperation<Block, Blake2Hasher>>::State as state_machine::Backend<Blake2Hasher>>::Transaction;
type ChangesUpdate = trie::MemoryDB<Blake2Hasher>;
pub struct Client<B, E, Block, RA> where Block: BlockT {
storage_notifications: Mutex<StorageNotifications<Block>>,
import_notification_sinks: Mutex<Vec<mpsc::UnboundedSender<BlockImportNotification<Block>>>>,
finality_notification_sinks: Mutex<Vec<mpsc::UnboundedSender<FinalityNotification<Block>>>>,
import_lock: Mutex<()>,
// holds the block hash currently being imported. TODO: replace this with block queue
importing_block: RwLock<Option<Block::Hash>>,
block_execution_strategy: ExecutionStrategy,
api_execution_strategy: ExecutionStrategy,
_phantom: PhantomData<RA>,
/// Client import operation, a wrapper for the backend.
pub struct ClientImportOperation<Block: BlockT, H: Hasher<Out=Block::Hash>, B: backend::Backend<Block, H>> {
op: B::BlockImportOperation,
notify_imported: Option<(Block::Hash, BlockOrigin, Block::Header, bool, Option<Vec<(Vec<u8>, Option<Vec<u8>>)>>)>,
notify_finalized: Vec<Block::Hash>,
}
/// Get block import event stream. Not guaranteed to be fired for every
/// imported block.
fn import_notification_stream(&self) -> ImportNotifications<Block>;
/// Get a stream of finality notifications. Not guaranteed to be fired for every
/// finalized block.
fn finality_notification_stream(&self) -> FinalityNotifications<Block>;
/// Get storage changes event stream.
///
/// Passing `None` as `filter_keys` subscribes to all storage changes.
fn storage_changes_notification_stream(&self, filter_keys: Option<&[StorageKey]>) -> error::Result<StorageEventStream<Block::Hash>>;
/// Chain head information.
/// Get best block header.
fn best_block_header(&self) -> Result<<Block as BlockT>::Header, error::Error>;
/// Get all leaves of the chain: block hashes that have no children currently.
/// Leaves that can never be finalized will not be returned.
fn leaves(&self) -> Result<Vec<<Block as BlockT>::Hash>, error::Error>;
/// Fetch block body by ID.
pub trait BlockBody<Block: BlockT> {
/// Get block body by ID. Returns `None` if the body is not stored.
fn block_body(&self, id: &BlockId<Block>) -> error::Result<Option<Vec<<Block as BlockT>::Extrinsic>>>;
}
pub best_queued_number: Option<<<Block as BlockT>::Header as HeaderT>::Number>,
}
/// Block status.
#[derive(Debug, PartialEq, Eq)]
pub enum BlockStatus {
/// Added to the import queue.
Queued,
/// Already in the blockchain.
InChain,
/// Block or parent is known to be bad.
KnownBad,
/// Not in the queue or the blockchain.
Unknown,
}
/// Summary of an imported block
#[derive(Clone, Debug)]
pub struct BlockImportNotification<Block: BlockT> {
/// Imported block origin.
pub origin: BlockOrigin,
/// Imported block header.
/// Is this the new best block.
pub is_new_best: bool,
}
/// Summary of a finalized block.
#[derive(Clone, Debug)]
pub struct FinalityNotification<Block: BlockT> {
/// Imported block header hash.
pub hash: Block::Hash,
/// Imported block header.
pub header: Block::Header,
}
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
// used in importing a block, where additional changes are made after the runtime
// executed.
enum PrePostHeader<H> {
// they are the same: no post-runtime digest items.
Same(H),
// different headers (pre, post).
Different(H, H),
}
impl<H> PrePostHeader<H> {
// get a reference to the "pre-header" -- the header as it should be just after the runtime.
fn pre(&self) -> &H {
match *self {
PrePostHeader::Same(ref h) => h,
PrePostHeader::Different(ref h, _) => h,
}
}
// get a reference to the "post-header" -- the header as it should be after all changes are applied.
fn post(&self) -> &H {
match *self {
PrePostHeader::Same(ref h) => h,
PrePostHeader::Different(_, ref h) => h,
}
}
// convert to the "post-header" -- the header as it should be after all changes are applied.
fn into_post(self) -> H {
match self {
PrePostHeader::Same(h) => h,
PrePostHeader::Different(_, h) => h,
}
}
}
pub fn new_in_mem<E, Block, S, RA>(
) -> error::Result<Client<in_mem::Backend<Block, Blake2Hasher>, LocalCallExecutor<in_mem::Backend<Block, Blake2Hasher>, E>, Block, RA>>
where
E: CodeExecutor<Blake2Hasher> + RuntimeInfo,
S: BuildStorage,
Block: BlockT<Hash=H256>,
new_with_backend(Arc::new(in_mem::Backend::new()), executor, genesis_storage)
}
/// Create a client with the explicitely provided backend.
/// This is useful for testing backend implementations.
pub fn new_with_backend<B, E, Block, S, RA>(
backend: Arc<B>,
executor: E,
build_genesis_storage: S,
) -> error::Result<Client<B, LocalCallExecutor<B, E>, Block, RA>>
where
E: CodeExecutor<Blake2Hasher> + RuntimeInfo,
S: BuildStorage,
Block: BlockT<Hash=H256>,
B: backend::LocalBackend<Block, Blake2Hasher>
{
let call_executor = LocalCallExecutor::new(backend.clone(), executor);
Client::new(backend, call_executor, build_genesis_storage, ExecutionStrategy::NativeWhenPossible, ExecutionStrategy::NativeWhenPossible)
impl<B, E, Block, RA> Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
/// Creates new Substrate Client with given blockchain and code executor.
block_execution_strategy: ExecutionStrategy,
api_execution_strategy: ExecutionStrategy,
if backend.blockchain().header(BlockId::Number(Zero::zero()))?.is_none() {
let (genesis_storage, children_genesis_storage) = build_genesis_storage.build_storage()?;
let mut op = backend.begin_operation()?;
backend.begin_state_operation(&mut op, BlockId::Hash(Default::default()))?;
let state_root = op.reset_storage(genesis_storage, children_genesis_storage)?;
let genesis_block = genesis::construct_genesis_block::<Block>(state_root.into());
info!("Initialising Genesis block/state (state: {}, header-hash: {})", genesis_block.header().state_root(), genesis_block.header().hash());
op.set_block_data(
genesis_block.deconstruct().0,
Some(vec![]),
None,
crate::backend::NewBlockState::Final
storage_notifications: Default::default(),
import_notification_sinks: Default::default(),
finality_notification_sinks: Default::default(),
import_lock: Default::default(),
importing_block: Default::default(),
block_execution_strategy,
api_execution_strategy,
_phantom: Default::default(),
})
}
/// Get a reference to the state at a given block.
pub fn state_at(&self, block: &BlockId<Block>) -> error::Result<B::State> {
self.backend.state_at(*block)
}
/// Expose backend reference. To be used in tests only
pub fn backend(&self) -> &Arc<B> {
/// Return storage entry keys in state in a block of given hash with given prefix.
pub fn storage_keys(&self, id: &BlockId<Block>, key_prefix: &StorageKey) -> error::Result<Vec<StorageKey>> {
let keys = self.state_at(id)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect();
Ok(keys)
}
/// Return single storage entry of contract under given address in state in a block of given hash.
pub fn storage(&self, id: &BlockId<Block>, key: &StorageKey) -> error::Result<Option<StorageData>> {
Ok(self.state_at(id)?
.storage(&key.0).map_err(|e| error::Error::from_state(Box::new(e)))?
.map(StorageData))
pub fn code_at(&self, id: &BlockId<Block>) -> error::Result<Vec<u8>> {
Ok(self.storage(id, &StorageKey(well_known_keys::CODE.to_vec()))?
.expect("None is returned if there's no value stored for the given key; ':code' key is always defined; qed").0)
/// Get the set of authorities at a given block.
pub fn authorities_at(&self, id: &BlockId<Block>) -> error::Result<Vec<AuthorityIdFor<Block>>> {
match self.backend.blockchain().cache().and_then(|cache| cache.authorities_at(*id)) {
Some(cached_value) => Ok(cached_value),
None => self.executor.call(id, "Core_authorities", &[])
.and_then(|r| Vec::<AuthorityIdFor<Block>>::decode(&mut &r[..])
.ok_or_else(|| error::ErrorKind::InvalidAuthoritiesSet.into()))
/// Get the RuntimeVersion at a given block.
pub fn runtime_version_at(&self, id: &BlockId<Block>) -> error::Result<RuntimeVersion> {
self.executor.runtime_version(id)
/// Get call executor reference.
pub fn executor(&self) -> &E {
&self.executor
/// Reads storage value at a given block + key, returning read proof.
pub fn read_proof(&self, id: &BlockId<Block>, key: &[u8]) -> error::Result<Vec<Vec<u8>>> {
self.state_at(id)
.and_then(|state| prove_read(state, key)
.map(|(_, proof)| proof)
.map_err(Into::into))
}
/// Execute a call to a contract on top of state in a block of given hash
/// AND returning execution proof.
pub fn execution_proof(&self, id: &BlockId<Block>, method: &str, call_data: &[u8]) -> error::Result<(Vec<u8>, Vec<Vec<u8>>)> {
let state = self.state_at(id)?;
let header = self.prepare_environment_block(id)?;
prove_execution(state, header, &self.executor, method, call_data)
/// Reads given header and generates CHT-based header proof.
pub fn header_proof(&self, id: &BlockId<Block>) -> error::Result<(Block::Header, Vec<Vec<u8>>)> {
self.header_proof_with_cht_size(id, cht::SIZE)
}
/// Get block hash by number.
pub fn block_hash(&self, block_number: <<Block as BlockT>::Header as HeaderT>::Number) -> error::Result<Option<Block::Hash>> {
self.backend.blockchain().hash(block_number)
}
/// Reads given header and generates CHT-based header proof for CHT of given size.
pub fn header_proof_with_cht_size(&self, id: &BlockId<Block>, cht_size: u64) -> error::Result<(Block::Header, Vec<Vec<u8>>)> {
let proof_error = || error::ErrorKind::Backend(format!("Failed to generate header proof for {:?}", id));
let header = self.backend.blockchain().expect_header(*id)?;
let block_num = *header.number();
let cht_num = cht::block_to_cht_number(cht_size, block_num).ok_or_else(proof_error)?;
let cht_start = cht::start_number(cht_size, cht_num);
let headers = (cht_start.as_()..).map(|num| self.block_hash(As::sa(num)));
let proof = cht::build_proof::<Block::Header, Blake2Hasher, _, _>(cht_size, cht_num, ::std::iter::once(block_num), headers)?;
Ok((header, proof))
}
/// Get longest range within [first; last] that is possible to use in `key_changes`
/// and `key_changes_proof` calls.
/// Range could be shortened from the beginning if some changes tries have been pruned.
/// Returns Ok(None) if changes trues are not supported.
pub fn max_key_changes_range(
&self,
first: NumberFor<Block>,
last: BlockId<Block>,
) -> error::Result<Option<(NumberFor<Block>, BlockId<Block>)>> {
let (config, storage) = match self.require_changes_trie().ok() {
Some((config, storage)) => (config, storage),
None => return Ok(None),
};
let first = first.as_();
let last_num = self.backend.blockchain().expect_block_number_from_id(&last)?.as_();
if first > last_num {
return Err(error::ErrorKind::ChangesTrieAccessFailed("Invalid changes trie range".into()).into());
}
let finalized_number = self.backend.blockchain().info()?.finalized_number;
let oldest = storage.oldest_changes_trie_block(&config, finalized_number.as_());
let first = As::sa(::std::cmp::max(first, oldest));
Ok(Some((first, last)))
}
/// Get pairs of (block, extrinsic) where key has been changed at given blocks range.
/// Works only for runtimes that are supporting changes tries.
pub fn key_changes(
&self,
first: NumberFor<Block>,
last: BlockId<Block>,
key: &StorageKey
) -> error::Result<Vec<(NumberFor<Block>, u32)>> {
let (config, storage) = self.require_changes_trie()?;
let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?.as_();
let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?;
key_changes::<_, Blake2Hasher>(
&*storage,
first.as_(),
hash: convert_hash(&last_hash),
number: last_number,
self.backend.blockchain().info()?.best_number.as_(),
&key.0)
.and_then(|r| r.map(|r| r.map(|(block, tx)| (As::sa(block), tx))).collect::<Result<_, _>>())
.map_err(|err| error::ErrorKind::ChangesTrieAccessFailed(err).into())
}
/// Get proof for computation of (block, extrinsic) pairs where key has been changed at given blocks range.
/// `min` is the hash of the first block, which changes trie root is known to the requester - when we're using
/// changes tries from ascendants of this block, we should provide proofs for changes tries roots
/// `max` is the hash of the last block known to the requester - we can't use changes tries from descendants
/// of this block.
/// Works only for runtimes that are supporting changes tries.
pub fn key_changes_proof(
&self,
first: Block::Hash,
last: Block::Hash,
min: Block::Hash,
) -> error::Result<ChangesProof<Block::Header>> {
self.key_changes_proof_with_cht_size(
first,
last,
min,
max,
key,
cht::SIZE,
)
}
/// Does the same work as `key_changes_proof`, but assumes that CHTs are of passed size.
pub fn key_changes_proof_with_cht_size(
&self,
first: Block::Hash,
last: Block::Hash,
min: Block::Hash,
max: Block::Hash,
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
cht_size: u64,
) -> error::Result<ChangesProof<Block::Header>> {
struct AccessedRootsRecorder<'a, Block: BlockT> {
storage: &'a ChangesTrieStorage<Blake2Hasher>,
min: u64,
required_roots_proofs: Mutex<BTreeMap<NumberFor<Block>, H256>>,
};
impl<'a, Block: BlockT> ChangesTrieRootsStorage<Blake2Hasher> for AccessedRootsRecorder<'a, Block> {
fn root(&self, anchor: &ChangesTrieAnchorBlockId<H256>, block: u64) -> Result<Option<H256>, String> {
let root = self.storage.root(anchor, block)?;
if block < self.min {
if let Some(ref root) = root {
self.required_roots_proofs.lock().insert(
As::sa(block),
root.clone()
);
}
}
Ok(root)
}
}
impl<'a, Block: BlockT> ChangesTrieStorage<Blake2Hasher> for AccessedRootsRecorder<'a, Block> {
fn get(&self, key: &H256) -> Result<Option<DBValue>, String> {
self.storage.get(key)
}
}
let (config, storage) = self.require_changes_trie()?;
let min_number = self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(min))?;
let recording_storage = AccessedRootsRecorder::<Block> {
storage,
min: min_number.as_(),
required_roots_proofs: Mutex::new(BTreeMap::new()),
};
let max_number = ::std::cmp::min(
self.backend.blockchain().info()?.best_number,
self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(max))?,
// fetch key changes proof
let first_number = self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(first))?.as_();
let last_number = self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(last))?.as_();
let key_changes_proof = key_changes_proof::<_, Blake2Hasher>(
&recording_storage,
first_number,
&ChangesTrieAnchorBlockId {
hash: convert_hash(&last),
number: last_number,
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
)
.map_err(|err| error::Error::from(error::ErrorKind::ChangesTrieAccessFailed(err)))?;
// now gather proofs for all changes tries roots that were touched during key_changes_proof
// execution AND are unknown (i.e. replaced with CHT) to the requester
let roots = recording_storage.required_roots_proofs.into_inner();
let roots_proof = self.changes_trie_roots_proof(cht_size, roots.keys().cloned())?;
Ok(ChangesProof {
max_block: max_number,
proof: key_changes_proof,
roots: roots.into_iter().map(|(n, h)| (n, convert_hash(&h))).collect(),
roots_proof,
})
}
/// Generate CHT-based proof for roots of changes tries at given blocks.
fn changes_trie_roots_proof<I: IntoIterator<Item=NumberFor<Block>>>(
&self,
cht_size: u64,
blocks: I
) -> error::Result<Vec<Vec<u8>>> {
// most probably we have touched several changes tries that are parts of the single CHT
// => GroupBy changes tries by CHT number and then gather proof for the whole group at once
let mut proof = HashSet::new();
cht::for_each_cht_group::<Block::Header, _, _, _>(cht_size, blocks, |_, cht_num, cht_blocks| {
let cht_proof = self.changes_trie_roots_proof_at_cht(cht_size, cht_num, cht_blocks)?;
proof.extend(cht_proof);
Ok(())
}, ())?;
Ok(proof.into_iter().collect())
}
/// Generates CHT-based proof for roots of changes tries at given blocks (that are part of single CHT).
fn changes_trie_roots_proof_at_cht(
&self,
cht_size: u64,
cht_num: NumberFor<Block>,
blocks: Vec<NumberFor<Block>>
) -> error::Result<Vec<Vec<u8>>> {
let cht_start = cht::start_number(cht_size, cht_num);
let roots = (cht_start.as_()..).map(|num| self.header(&BlockId::Number(As::sa(num)))
.map(|block| block.and_then(|block| block.digest().log(DigestItem::as_changes_trie_root).cloned())));
let proof = cht::build_proof::<Block::Header, Blake2Hasher, _, _>(cht_size, cht_num, blocks, roots)?;
Ok(proof)
/// Returns changes trie configuration and storage or an error if it is not supported.
fn require_changes_trie(&self) -> error::Result<(ChangesTrieConfiguration, &B::ChangesTrieStorage)> {
let config = self.changes_trie_config()?;
let storage = self.backend.changes_trie_storage();
match (config, storage) {
(Some(config), Some(storage)) => Ok((config, storage)),
_ => Err(error::ErrorKind::ChangesTriesNotSupported.into()),
}
}
/// Create a new block, built on the head of the chain.
&self
) -> error::Result<block_builder::BlockBuilder<Block, Self>> where
E: Clone + Send + Sync,
RA: Send + Sync,
Self: ProvideRuntimeApi,
<Self as ProvideRuntimeApi>::Api: BlockBuilderAPI<Block>
block_builder::BlockBuilder::new(self)
}
/// Create a new block, built on top of `parent`.
&self, parent: &BlockId<Block>
) -> error::Result<block_builder::BlockBuilder<Block, Self>> where
E: Clone + Send + Sync,
RA: Send + Sync,
Self: ProvideRuntimeApi,
<Self as ProvideRuntimeApi>::Api: BlockBuilderAPI<Block>
block_builder::BlockBuilder::at_block(parent, &self)
}
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
/// Lock the import lock, and run operations inside.
pub fn lock_import_and_run<R, F: FnOnce(&mut ClientImportOperation<Block, Blake2Hasher, B>) -> error::Result<R>>(
&self, f: F
) -> error::Result<R> {
let inner = || {
let _import_lock = self.import_lock.lock();
let mut op = ClientImportOperation {
op: self.backend.begin_operation()?,
notify_imported: None,
notify_finalized: Vec::new(),
};
let r = f(&mut op)?;
let ClientImportOperation { op, notify_imported, notify_finalized } = op;
self.backend.commit_operation(op)?;
self.notify_finalized(notify_finalized)?;
if let Some(notify_imported) = notify_imported {
self.notify_imported(notify_imported)?;
}
Ok(r)
};
let result = inner();
*self.importing_block.write() = None;
result
}
/// Apply a checked and validated block to an operation. If a justification is provided
/// then `finalized` *must* be true.
pub fn apply_block(
&self,
operation: &mut ClientImportOperation<Block, Blake2Hasher, B>,
import_block: ImportBlock<Block>,
new_authorities: Option<Vec<AuthorityIdFor<Block>>>,
) -> error::Result<ImportResult> where
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + Clone,
{
use runtime_primitives::traits::Digest;
let ImportBlock {
origin,
header,
justification,
post_digests,
body,
finalized,
auxiliary,
fork_choice,
} = import_block;
assert!(justification.is_some() && finalized || justification.is_none());
let parent_hash = header.parent_hash().clone();
match self.backend.blockchain().status(BlockId::Hash(parent_hash))? {
blockchain::BlockStatus::InChain => {},
blockchain::BlockStatus::Unknown => return Ok(ImportResult::UnknownParent),
}
let import_headers = if post_digests.is_empty() {
PrePostHeader::Same(header)
} else {
let mut post_header = header.clone();
for item in post_digests {
post_header.digest_mut().push(item);
}
PrePostHeader::Different(header, post_header)
};
let hash = import_headers.post().hash();
let height: u64 = import_headers.post().number().as_();
*self.importing_block.write() = Some(hash);
let result = self.execute_and_import_block(
operation,
origin,
hash,
import_headers,
justification,
body,
new_authorities,
finalized,
auxiliary,
fork_choice,
);
telemetry!("block.import";
"height" => height,
"best" => ?hash,
"origin" => ?origin
);
result
}
fn execute_and_import_block(
&self,
operation: &mut ClientImportOperation<Block, Blake2Hasher, B>,
origin: BlockOrigin,
import_headers: PrePostHeader<Block::Header>,
justification: Option<Justification>,
authorities: Option<Vec<AuthorityIdFor<Block>>>,
aux: Vec<(Vec<u8>, Option<Vec<u8>>)>,
) -> error::Result<ImportResult> where
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + Clone,
{
let parent_hash = import_headers.post().parent_hash().clone();
match self.backend.blockchain().status(BlockId::Hash(hash))? {
blockchain::BlockStatus::InChain => return Ok(ImportResult::AlreadyInChain),
blockchain::BlockStatus::Unknown => {},
}
let (last_best, last_best_number) = {
let info = self.backend.blockchain().info()?;
(info.best_hash, info.best_number)
};
// this is a fairly arbitrary choice of where to draw the line on making notifications,
// but the general goal is to only make notifications when we are already fully synced
// and get a new chain head.
let make_notifications = match origin {
BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => true,
BlockOrigin::Genesis | BlockOrigin::NetworkInitialSync | BlockOrigin::File => false,
};
self.backend.begin_state_operation(&mut operation.op, BlockId::Hash(parent_hash))?;
// ensure parent block is finalized to maintain invariant that
// finality is called sequentially.
if finalized {
self.apply_finality_with_block_hash(operation, parent_hash, None, last_best, make_notifications)?;
// FIXME #1232: correct path logic for when to execute this function
let (storage_update,changes_update,storage_changes) = self.block_execution(&operation.op, &import_headers, origin, hash, body.clone())?;
let is_new_best = finalized || match fork_choice {
ForkChoiceStrategy::LongestChain => import_headers.post().number() > &last_best_number,
ForkChoiceStrategy::Custom(v) => v,
};
let leaf_state = if finalized {
crate::backend::NewBlockState::Final
} else if is_new_best {
crate::backend::NewBlockState::Best
crate::backend::NewBlockState::Normal
trace!("Imported {}, (#{}), best={}, origin={:?}", hash, import_headers.post().number(), is_new_best, origin);
operation.op.set_block_data(
import_headers.post().clone(),
leaf_state,
)?;
if let Some(authorities) = authorities {
operation.op.update_authorities(authorities);
if let Some(storage_update) = storage_update {
operation.op.update_db_storage(storage_update)?;
}
if let Some(storage_changes) = storage_changes.clone() {
operation.op.update_storage(storage_changes)?;
Svyatoslav Nikolsky
committed
if let Some(Some(changes_update)) = changes_update {
operation.op.update_changes_trie(changes_update)?;
Svyatoslav Nikolsky
committed
}
operation.op.insert_aux(aux)?;
if make_notifications {
if finalized {
operation.notify_finalized.push(hash);
operation.notify_imported = Some((hash, origin, import_headers.into_post(), is_new_best, storage_changes));
fn block_execution(
&self,
transaction: &B::BlockImportOperation,
import_headers: &PrePostHeader<Block::Header>,
origin: BlockOrigin,
hash: Block::Hash,
body: Option<Vec<Block::Extrinsic>>,
) -> error::Result<(
Option<StorageUpdate<B, Block>>,
Option<Option<ChangesUpdate>>,
Option<Vec<(Vec<u8>, Option<Vec<u8>>)>>,
where
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + Clone,
{
match transaction.state()? {
Some(transaction_state) => {
let mut overlay = Default::default();
Bastian Köcher
committed
let (_, storage_update, changes_update) = self.executor.call_at_state::<_, _, NeverNativeValue, fn() -> _>(
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
transaction_state,
&mut overlay,
"Core_execute_block",
&<Block as BlockT>::new(import_headers.pre().clone(), body.unwrap_or_default()).encode(),
match (origin, self.block_execution_strategy) {
(BlockOrigin::NetworkInitialSync, _) | (_, ExecutionStrategy::NativeWhenPossible) =>
ExecutionManager::NativeWhenPossible,
(_, ExecutionStrategy::AlwaysWasm) => ExecutionManager::AlwaysWasm,
_ => ExecutionManager::Both(|wasm_result, native_result| {
let header = import_headers.post();
warn!("Consensus error between wasm and native block execution at block {}", hash);
warn!(" Header {:?}", header);
warn!(" Native result {:?}", native_result);
warn!(" Wasm result {:?}", wasm_result);
telemetry!("block.execute.consensus_failure";
"hash" => ?hash,
"origin" => ?origin,
"header" => ?header
);
wasm_result
}),
},
None,
)?;
overlay.commit_prospective();
Ok((Some(storage_update), Some(changes_update), Some(overlay.into_committed().collect())))
},
None => Ok((None, None, None))
}
}
fn apply_finality_with_block_hash(
operation: &mut ClientImportOperation<Block, Blake2Hasher, B>,
block: Block::Hash,
justification: Option<Justification>,
best_block: Block::Hash,
notify: bool,
) -> error::Result<()> {
// find tree route from last finalized to given block.
let last_finalized = self.backend.blockchain().last_finalized()?;
if block == last_finalized {
warn!("Possible safety violation: attempted to re-finalize last finalized block {:?} ", last_finalized);
return Ok(());
}
let route_from_finalized = crate::blockchain::tree_route(
self.backend.blockchain(),
BlockId::Hash(last_finalized),
BlockId::Hash(block),
)?;
if let Some(retracted) = route_from_finalized.retracted().get(0) {
warn!("Safety violation: attempted to revert finalized block {:?} which is not in the \
same chain as last finalized {:?}", retracted, last_finalized);
bail!(error::ErrorKind::NotInFinalizedChain);
}
let route_from_best = crate::blockchain::tree_route(
self.backend.blockchain(),
BlockId::Hash(best_block),
BlockId::Hash(block),
)?;
// if the block is not a direct ancestor of the current best chain,
// then some other block is the common ancestor.
if route_from_best.common_block().hash != block {
// FIXME: #1442 reorganize best block to be the best chain containing
let enacted = route_from_finalized.enacted();
assert!(enacted.len() > 0);
for finalize_new in &enacted[..enacted.len() - 1] {
operation.op.mark_finalized(BlockId::Hash(finalize_new.hash), None)?;
assert_eq!(enacted.last().map(|e| e.hash), Some(block));
operation.op.mark_finalized(BlockId::Hash(block), justification)?;
if notify {
// sometimes when syncing, tons of blocks can be finalized at once.
// we'll send notifications spuriously in that case.
const MAX_TO_NOTIFY: usize = 256;
let enacted = route_from_finalized.enacted();
let start = enacted.len() - ::std::cmp::min(enacted.len(), MAX_TO_NOTIFY);
for finalized in &enacted[start..] {
operation.notify_finalized.push(finalized.hash);
}
}
Ok(())
}
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
fn notify_finalized(
&self,
notify_finalized: Vec<Block::Hash>,
) -> error::Result<()> {
let mut sinks = self.finality_notification_sinks.lock();
for finalized_hash in notify_finalized {
let header = self.header(&BlockId::Hash(finalized_hash))?
.expect("header already known to exist in DB because it is indicated in the tree route; qed");
let notification = FinalityNotification {
header,
hash: finalized_hash,
};
sinks.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
}
Ok(())
}
fn notify_imported(
&self,
notify_import: (Block::Hash, BlockOrigin, Block::Header, bool, Option<Vec<(Vec<u8>, Option<Vec<u8>>)>>),
) -> error::Result<()> {
let (hash, origin, header, is_new_best, storage_changes) = notify_import;
if let Some(storage_changes) = storage_changes {
// TODO [ToDr] How to handle re-orgs? Should we re-emit all storage changes?
self.storage_notifications.lock()
.trigger(&hash, storage_changes.into_iter());
}
let notification = BlockImportNotification::<Block> {
hash,
origin,
header,
is_new_best,
};
self.import_notification_sinks.lock()
.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
Ok(())
}
/// Apply auxiliary data insertion into an operation.
pub fn apply_aux<
'a,
'b: 'a,
'c: 'a,
I: IntoIterator<Item=&'a(&'c [u8], &'c [u8])>,
D: IntoIterator<Item=&'a &'b [u8]>,
>(
&self,
operation: &mut ClientImportOperation<Block, Blake2Hasher, B>,
insert: I,
delete: D
) -> error::Result<()> {
operation.op.insert_aux(
insert.into_iter()
.map(|(k, v)| (k.to_vec(), Some(v.to_vec())))
.chain(delete.into_iter().map(|k| (k.to_vec(), None)))
)
}
/// Mark all blocks up to given as finalized in operation. If a
/// justification is provided it is stored with the given finalized
/// block (any other finalized blocks are left unjustified).
pub fn apply_finality(
&self,
operation: &mut ClientImportOperation<Block, Blake2Hasher, B>,
id: BlockId<Block>,
justification: Option<Justification>,
notify: bool,
) -> error::Result<()> {
let last_best = self.backend.blockchain().info()?.best_hash;
let to_finalize_hash = self.backend.blockchain().expect_block_hash_from_id(&id)?;
self.apply_finality_with_block_hash(operation, to_finalize_hash, justification, last_best, notify)
}
/// Finalize a block. This will implicitly finalize all blocks up to it and
/// fire finality notifications.
///
/// Pass a flag to indicate whether finality notifications should be propagated.
/// This is usually tied to some synchronization state, where we don't send notifications
/// while performing major synchronization work.
pub fn finalize_block(&self, id: BlockId<Block>, justification: Option<Justification>, notify: bool) -> error::Result<()> {
self.lock_import_and_run(|operation| {
let last_best = self.backend.blockchain().info()?.best_hash;
let to_finalize_hash = self.backend.blockchain().expect_block_hash_from_id(&id)?;
self.apply_finality_with_block_hash(operation, to_finalize_hash, justification, last_best, notify)
})
/// Attempts to revert the chain by `n` blocks. Returns the number of blocks that were