Newer
Older
// Copyright 2017-2018 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! Substrate Client
use std::{marker::PhantomData, collections::{HashSet, BTreeMap}, sync::Arc};
use error::Error;
use parking_lot::{Mutex, RwLock};
transaction_validity::{TransactionValidity, TransactionTag},
};
use consensus::{ImportBlock, ImportResult, BlockOrigin};
use runtime_primitives::traits::{
Block as BlockT, Header as HeaderT, Zero, As, NumberFor, CurrentHeight, BlockNumberToHash,
ApiRef, ProvideRuntimeApi, Digest, DigestItem,
};
use runtime_primitives::BuildStorage;
use runtime_api::{Core as CoreAPI, CallApiAt, TaggedTransactionQueue, ConstructRuntimeApi};
use primitives::{Blake2Hasher, H256, ChangesTrieConfiguration, convert_hash};
use primitives::storage::{StorageKey, StorageData};
use primitives::storage::well_known_keys;
use codec::Decode;
use state_machine::{
DBValue, Backend as StateBackend, CodeExecutor, ChangesTrieAnchorBlockId,
ExecutionStrategy, ExecutionManager, prove_read,
ChangesTrieRootsStorage, ChangesTrieStorage,
key_changes, key_changes_proof, OverlayedChanges
use codec::Encode;
use blockchain::{self, Info as ChainInfo, Backend as ChainBackend, HeaderBackend as ChainHeaderBackend};
use call_executor::{CallExecutor, LocalCallExecutor};
use notifications::{StorageNotifications, StorageEventStream};
use light::fetcher::ChangesProof;
use {cht, error, in_mem, block_builder::{self, api::BlockBuilder as BlockBuilderAPI}, genesis, consensus};
/// Type that implements `futures::Stream` of block import events.
pub type ImportNotifications<Block> = mpsc::UnboundedReceiver<BlockImportNotification<Block>>;
/// A stream of block finality notifications.
pub type FinalityNotifications<Block> = mpsc::UnboundedReceiver<FinalityNotification<Block>>;
pub struct Client<B, E, Block, RA> where Block: BlockT {
storage_notifications: Mutex<StorageNotifications<Block>>,
import_notification_sinks: Mutex<Vec<mpsc::UnboundedSender<BlockImportNotification<Block>>>>,
finality_notification_sinks: Mutex<Vec<mpsc::UnboundedSender<FinalityNotification<Block>>>>,
import_lock: Mutex<()>,
importing_block: RwLock<Option<Block::Hash>>, // holds the block hash currently being imported. TODO: replace this with block queue
block_execution_strategy: ExecutionStrategy,
api_execution_strategy: ExecutionStrategy,
_phantom: PhantomData<RA>,
/// Get block import event stream. Not guaranteed to be fired for every
/// imported block.
fn import_notification_stream(&self) -> ImportNotifications<Block>;
/// Get a stream of finality notifications. Not guaranteed to be fired for every
/// finalized block.
fn finality_notification_stream(&self) -> FinalityNotifications<Block>;
/// Get storage changes event stream.
///
/// Passing `None` as `filter_keys` subscribes to all storage changes.
fn storage_changes_notification_stream(&self, filter_keys: Option<&[StorageKey]>) -> error::Result<StorageEventStream<Block::Hash>>;
/// Chain head information.
/// Get best block header.
fn best_block_header(&self) -> Result<<Block as BlockT>::Header, error::Error>;
/// Get all leaves of the chain: block hashes that have no children currently.
/// Leaves that can never be finalized will not be returned.
fn leaves(&self) -> Result<Vec<<Block as BlockT>::Hash>, error::Error>;
/// Fetch block body by ID.
pub trait BlockBody<Block: BlockT> {
/// Get block body by ID. Returns `None` if the body is not stored.
fn block_body(&self, id: &BlockId<Block>) -> error::Result<Option<Vec<<Block as BlockT>::Extrinsic>>>;
}
/// Client info
// TODO: split queue info from chain info and amalgamate into single struct.
#[derive(Debug)]
pub best_queued_number: Option<<<Block as BlockT>::Header as HeaderT>::Number>,
}
/// Block status.
#[derive(Debug, PartialEq, Eq)]
pub enum BlockStatus {
/// Added to the import queue.
Queued,
/// Already in the blockchain.
InChain,
/// Block or parent is known to be bad.
KnownBad,
/// Not in the queue or the blockchain.
Unknown,
}
/// Summary of an imported block
#[derive(Clone, Debug)]
pub struct BlockImportNotification<Block: BlockT> {
/// Imported block origin.
pub origin: BlockOrigin,
/// Imported block header.
/// Is this the new best block.
pub is_new_best: bool,
/// Tags provided by transactions imported in that block.
pub tags: Vec<TransactionTag>,
/// Summary of a finalized block.
#[derive(Clone, Debug)]
pub struct FinalityNotification<Block: BlockT> {
/// Imported block header hash.
pub hash: Block::Hash,
/// Imported block header.
pub header: Block::Header,
}
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
// used in importing a block, where additional changes are made after the runtime
// executed.
enum PrePostHeader<H> {
// they are the same: no post-runtime digest items.
Same(H),
// different headers (pre, post).
Different(H, H),
}
impl<H> PrePostHeader<H> {
// get a reference to the "pre-header" -- the header as it should be just after the runtime.
fn pre(&self) -> &H {
match *self {
PrePostHeader::Same(ref h) => h,
PrePostHeader::Different(ref h, _) => h,
}
}
// get a reference to the "post-header" -- the header as it should be after all changes are applied.
fn post(&self) -> &H {
match *self {
PrePostHeader::Same(ref h) => h,
PrePostHeader::Different(_, ref h) => h,
}
}
// convert to the "post-header" -- the header as it should be after all changes are applied.
fn into_post(self) -> H {
match self {
PrePostHeader::Same(h) => h,
PrePostHeader::Different(_, h) => h,
}
}
}
pub fn new_in_mem<E, Block, S, RA>(
) -> error::Result<Client<in_mem::Backend<Block, Blake2Hasher>, LocalCallExecutor<in_mem::Backend<Block, Blake2Hasher>, E>, Block, RA>>
where
E: CodeExecutor<Blake2Hasher> + RuntimeInfo,
S: BuildStorage,
Block: BlockT<Hash=H256>,
new_with_backend(Arc::new(in_mem::Backend::new()), executor, genesis_storage)
}
/// Create a client with the explicitely provided backend.
/// This is useful for testing backend implementations.
pub fn new_with_backend<B, E, Block, S, RA>(
backend: Arc<B>,
executor: E,
build_genesis_storage: S,
) -> error::Result<Client<B, LocalCallExecutor<B, E>, Block, RA>>
where
E: CodeExecutor<Blake2Hasher> + RuntimeInfo,
S: BuildStorage,
Block: BlockT<Hash=H256>,
B: backend::LocalBackend<Block, Blake2Hasher>
{
let call_executor = LocalCallExecutor::new(backend.clone(), executor);
Client::new(backend, call_executor, build_genesis_storage, ExecutionStrategy::NativeWhenPossible, ExecutionStrategy::NativeWhenPossible)
impl<B, E, Block, RA> Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
/// Creates new Substrate Client with given blockchain and code executor.
block_execution_strategy: ExecutionStrategy,
api_execution_strategy: ExecutionStrategy,
if backend.blockchain().header(BlockId::Number(Zero::zero()))?.is_none() {
let (genesis_storage, children_genesis_storage) = build_genesis_storage.build_storage()?;
let mut op = backend.begin_operation(BlockId::Hash(Default::default()))?;
let state_root = op.reset_storage(genesis_storage, children_genesis_storage)?;
let genesis_block = genesis::construct_genesis_block::<Block>(state_root.into());
info!("Initialising Genesis block/state (state: {}, header-hash: {})", genesis_block.header().state_root(), genesis_block.header().hash());
op.set_block_data(
genesis_block.deconstruct().0,
Some(vec![]),
None,
::backend::NewBlockState::Final
)?;
storage_notifications: Default::default(),
import_notification_sinks: Default::default(),
finality_notification_sinks: Default::default(),
import_lock: Default::default(),
importing_block: Default::default(),
block_execution_strategy,
api_execution_strategy,
_phantom: Default::default(),
})
}
/// Get a reference to the state at a given block.
pub fn state_at(&self, block: &BlockId<Block>) -> error::Result<B::State> {
self.backend.state_at(*block)
}
/// Expose backend reference. To be used in tests only
pub fn backend(&self) -> &Arc<B> {
&self.backend
}
/// Return single storage entry of contract under given address in state in a block of given hash.
pub fn storage(&self, id: &BlockId<Block>, key: &StorageKey) -> error::Result<Option<StorageData>> {
Ok(self.state_at(id)?
.storage(&key.0).map_err(|e| error::Error::from_state(Box::new(e)))?
.map(StorageData))
pub fn code_at(&self, id: &BlockId<Block>) -> error::Result<Vec<u8>> {
Ok(self.storage(id, &StorageKey(well_known_keys::CODE.to_vec()))?
.expect("None is returned if there's no value stored for the given key; ':code' key is always defined; qed").0)
/// Get the set of authorities at a given block.
pub fn authorities_at(&self, id: &BlockId<Block>) -> error::Result<Vec<AuthorityId>> {
match self.backend.blockchain().cache().and_then(|cache| cache.authorities_at(*id)) {
Some(cached_value) => Ok(cached_value),
None => self.executor.call(id, "authorities",&[])
.and_then(|r| Vec::<AuthorityId>::decode(&mut &r.return_data[..])
.ok_or(error::ErrorKind::InvalidAuthoritiesSet.into()))
/// Get the RuntimeVersion at a given block.
pub fn runtime_version_at(&self, id: &BlockId<Block>) -> error::Result<RuntimeVersion> {
// TODO: Post Poc-2 return an error if version is missing
self.executor.runtime_version(id)
/// Get call executor reference.
pub fn executor(&self) -> &E {
&self.executor
/// Reads storage value at a given block + key, returning read proof.
pub fn read_proof(&self, id: &BlockId<Block>, key: &[u8]) -> error::Result<Vec<Vec<u8>>> {
self.state_at(id)
.and_then(|state| prove_read(state, key)
.map(|(_, proof)| proof)
.map_err(Into::into))
}
/// Execute a call to a contract on top of state in a block of given hash
/// AND returning execution proof.
pub fn execution_proof(&self, id: &BlockId<Block>, method: &str, call_data: &[u8]) -> error::Result<(Vec<u8>, Vec<Vec<u8>>)> {
self.state_at(id).and_then(|state| self.executor.prove_at_state(state, &mut Default::default(), method, call_data))
/// Reads given header and generates CHT-based header proof.
pub fn header_proof(&self, id: &BlockId<Block>) -> error::Result<(Block::Header, Vec<Vec<u8>>)> {
self.header_proof_with_cht_size(id, cht::SIZE)
}
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
pub(crate) fn call_at_state(
&self,
at: &BlockId<Block>,
function: &'static str,
args: Vec<u8>,
changes: &mut OverlayedChanges
) -> error::Result<Vec<u8>> {
let state = self.state_at(at)?;
let execution_manager = || match self.api_execution_strategy {
ExecutionStrategy::NativeWhenPossible => ExecutionManager::NativeWhenPossible,
ExecutionStrategy::AlwaysWasm => ExecutionManager::AlwaysWasm,
ExecutionStrategy::Both => ExecutionManager::Both(|wasm_result, native_result| {
warn!("Consensus error between wasm and native runtime execution at block {:?}", at);
warn!(" Function {:?}", function);
warn!(" Native result {:?}", native_result);
warn!(" Wasm result {:?}", wasm_result);
wasm_result
}),
};
self.executor.call_at_state(&state, changes, function, &args, execution_manager())
.map(|res| res.0)
}
/// Get block hash by number.
pub fn block_hash(&self, block_number: <<Block as BlockT>::Header as HeaderT>::Number) -> error::Result<Option<Block::Hash>> {
self.backend.blockchain().hash(block_number)
}
/// Reads given header and generates CHT-based header proof for CHT of given size.
pub fn header_proof_with_cht_size(&self, id: &BlockId<Block>, cht_size: u64) -> error::Result<(Block::Header, Vec<Vec<u8>>)> {
let proof_error = || error::ErrorKind::Backend(format!("Failed to generate header proof for {:?}", id));
let header = self.header(id)?.ok_or_else(|| error::ErrorKind::UnknownBlock(format!("{:?}", id)))?;
let block_num = *header.number();
let cht_num = cht::block_to_cht_number(cht_size, block_num).ok_or_else(proof_error)?;
let cht_start = cht::start_number(cht_size, cht_num);
let headers = (cht_start.as_()..).map(|num| self.block_hash(As::sa(num)));
let proof = cht::build_proof::<Block::Header, Blake2Hasher, _, _>(cht_size, cht_num, ::std::iter::once(block_num), headers)?;
Ok((header, proof))
}
/// Get pairs of (block, extrinsic) where key has been changed at given blocks range.
/// Works only for runtimes that are supporting changes tries.
pub fn key_changes(
&self,
first: Block::Hash,
last: Block::Hash,
key: &[u8]
) -> error::Result<Vec<(NumberFor<Block>, u32)>> {
let config = self.changes_trie_config()?;
let storage = self.backend.changes_trie_storage();
let (config, storage) = match (config, storage) {
(Some(config), Some(storage)) => (config, storage),
_ => return Err(error::ErrorKind::ChangesTriesNotSupported.into()),
};
key_changes::<_, Blake2Hasher>(
storage,
self.require_block_number_from_id(&BlockId::Hash(first))?.as_(),
&ChangesTrieAnchorBlockId {
hash: convert_hash(&last),
number: self.require_block_number_from_id(&BlockId::Hash(last))?.as_(),
},
self.backend.blockchain().info()?.best_number.as_(),
key)
.map_err(|err| error::ErrorKind::ChangesTrieAccessFailed(err).into())
.map(|r| r.into_iter().map(|(b, e)| (As::sa(b), e)).collect())
}
/// Get proof for computation of (block, extrinsic) pairs where key has been changed at given blocks range.
/// `min` is the hash of the first block, which changes trie root is known to the requester - when we're using
/// changes tries from ascendants of this block, we should provide proofs for changes tries roots
/// `max` is the hash of the last block known to the requester - we can't use changes tries from descendants
/// of this block.
/// Works only for runtimes that are supporting changes tries.
pub fn key_changes_proof(
&self,
first: Block::Hash,
last: Block::Hash,
min: Block::Hash,
max: Block::Hash,
key: &[u8]
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
) -> error::Result<ChangesProof<Block::Header>> {
self.key_changes_proof_with_cht_size(
first,
last,
min,
max,
key,
cht::SIZE,
)
}
/// Does the same work as `key_changes_proof`, but assumes that CHTs are of passed size.
pub fn key_changes_proof_with_cht_size(
&self,
first: Block::Hash,
last: Block::Hash,
min: Block::Hash,
max: Block::Hash,
key: &[u8],
cht_size: u64,
) -> error::Result<ChangesProof<Block::Header>> {
struct AccessedRootsRecorder<'a, Block: BlockT> {
storage: &'a ChangesTrieStorage<Blake2Hasher>,
min: u64,
required_roots_proofs: Mutex<BTreeMap<NumberFor<Block>, H256>>,
};
impl<'a, Block: BlockT> ChangesTrieRootsStorage<Blake2Hasher> for AccessedRootsRecorder<'a, Block> {
fn root(&self, anchor: &ChangesTrieAnchorBlockId<H256>, block: u64) -> Result<Option<H256>, String> {
let root = self.storage.root(anchor, block)?;
if block < self.min {
if let Some(ref root) = root {
self.required_roots_proofs.lock().insert(
As::sa(block),
root.clone()
);
}
}
Ok(root)
}
}
impl<'a, Block: BlockT> ChangesTrieStorage<Blake2Hasher> for AccessedRootsRecorder<'a, Block> {
fn get(&self, key: &H256) -> Result<Option<DBValue>, String> {
self.storage.get(key)
}
}
let config = self.changes_trie_config()?;
let storage = self.backend.changes_trie_storage();
let (config, storage) = match (config, storage) {
(Some(config), Some(storage)) => (config, storage),
_ => return Err(error::ErrorKind::ChangesTriesNotSupported.into()),
};
let min_number = self.require_block_number_from_id(&BlockId::Hash(min))?;
let recording_storage = AccessedRootsRecorder::<Block> {
storage,
min: min_number.as_(),
required_roots_proofs: Mutex::new(BTreeMap::new()),
};
let max_number = ::std::cmp::min(
self.backend.blockchain().info()?.best_number,
self.require_block_number_from_id(&BlockId::Hash(max))?,
);
// fetch key changes proof
let key_changes_proof = key_changes_proof::<_, Blake2Hasher>(
&recording_storage,
self.require_block_number_from_id(&BlockId::Hash(first))?.as_(),
&ChangesTrieAnchorBlockId {
hash: convert_hash(&last),
number: self.require_block_number_from_id(&BlockId::Hash(last))?.as_(),
},
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
key
)
.map_err(|err| error::Error::from(error::ErrorKind::ChangesTrieAccessFailed(err)))?;
// now gather proofs for all changes tries roots that were touched during key_changes_proof
// execution AND are unknown (i.e. replaced with CHT) to the requester
let roots = recording_storage.required_roots_proofs.into_inner();
let roots_proof = self.changes_trie_roots_proof(cht_size, roots.keys().cloned())?;
Ok(ChangesProof {
max_block: max_number,
proof: key_changes_proof,
roots: roots.into_iter().map(|(n, h)| (n, convert_hash(&h))).collect(),
roots_proof,
})
}
/// Generate CHT-based proof for roots of changes tries at given blocks.
fn changes_trie_roots_proof<I: IntoIterator<Item=NumberFor<Block>>>(
&self,
cht_size: u64,
blocks: I
) -> error::Result<Vec<Vec<u8>>> {
// most probably we have touched several changes tries that are parts of the single CHT
// => GroupBy changes tries by CHT number and then gather proof for the whole group at once
let mut proof = HashSet::new();
cht::for_each_cht_group::<Block::Header, _, _, _>(cht_size, blocks, |_, cht_num, cht_blocks| {
let cht_proof = self.changes_trie_roots_proof_at_cht(cht_size, cht_num, cht_blocks)?;
proof.extend(cht_proof);
Ok(())
}, ())?;
Ok(proof.into_iter().collect())
}
/// Generates CHT-based proof for roots of changes tries at given blocks (that are part of single CHT).
fn changes_trie_roots_proof_at_cht(
&self,
cht_size: u64,
cht_num: NumberFor<Block>,
blocks: Vec<NumberFor<Block>>
) -> error::Result<Vec<Vec<u8>>> {
let cht_start = cht::start_number(cht_size, cht_num);
let roots = (cht_start.as_()..).map(|num| self.header(&BlockId::Number(As::sa(num)))
.map(|block| block.and_then(|block| block.digest().log(DigestItem::as_changes_trie_root).cloned())));
let proof = cht::build_proof::<Block::Header, Blake2Hasher, _, _>(cht_size, cht_num, blocks, roots)?;
Ok(proof)
/// Create a new block, built on the head of the chain.
pub fn new_block(
&self
) -> error::Result<block_builder::BlockBuilder<Block, Self>> where
E: Clone + Send + Sync,
RA: BlockBuilderAPI<Block>
block_builder::BlockBuilder::new(self)
}
/// Create a new block, built on top of `parent`.
pub fn new_block_at(
&self, parent: &BlockId<Block>
) -> error::Result<block_builder::BlockBuilder<Block, Self>> where
E: Clone + Send + Sync,
RA: BlockBuilderAPI<Block>
block_builder::BlockBuilder::at_block(parent, &self)
}
// TODO [ToDr] Optimize and re-use tags from the pool.
fn transaction_tags(
&self,
at: Block::Hash,
body: &Option<Vec<Block::Extrinsic>>
) -> error::Result<Vec<TransactionTag>> where
RA: TaggedTransactionQueue<Block>,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + Clone,
{
let id = BlockId::Hash(at);
Ok(match body {
None => vec![],
Some(ref extrinsics) => {
let mut tags = vec![];
for tx in extrinsics {
let tx = self.runtime_api().validate_transaction(&id, &tx)?;
TransactionValidity::Valid { mut provides, .. } => {
tags.append(&mut provides);
},
// silently ignore invalid extrinsics,
// cause they might just be inherent
_ => {}
}
}
tags
},
})
}
fn execute_and_import_block(
&self,
origin: BlockOrigin,
import_headers: PrePostHeader<Block::Header>,
justification: Justification,
authorities: Option<Vec<AuthorityId>>,
aux: Vec<(Vec<u8>, Option<Vec<u8>>)>,
) -> error::Result<ImportResult> where
RA: TaggedTransactionQueue<Block>,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + Clone,
{
let parent_hash = import_headers.post().parent_hash().clone();
match self.backend.blockchain().status(BlockId::Hash(hash))? {
blockchain::BlockStatus::InChain => return Ok(ImportResult::AlreadyInChain),
blockchain::BlockStatus::Unknown => {},
}
let (last_best, last_best_number) = {
let info = self.backend.blockchain().info()?;
(info.best_hash, info.best_number)
};
// this is a fairly arbitrary choice of where to draw the line on making notifications,
// but the general goal is to only make notifications when we are already fully synced
// and get a new chain head.
let make_notifications = match origin {
BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => true,
BlockOrigin::Genesis | BlockOrigin::NetworkInitialSync | BlockOrigin::File => false,
};
// ensure parent block is finalized to maintain invariant that
// finality is called sequentially.
if finalized {
self.apply_finality(parent_hash, last_best, make_notifications)?;
}
let tags = self.transaction_tags(parent_hash, &body)?;
let mut transaction = self.backend.begin_operation(BlockId::Hash(parent_hash))?;
Svyatoslav Nikolsky
committed
let (storage_update, changes_update, storage_changes) = match transaction.state()? {
Some(transaction_state) => {
let mut overlay = Default::default();
transaction_state,
&mut overlay,
"execute_block",
&<Block as BlockT>::new(import_headers.pre().clone(), body.clone().unwrap_or_default()).encode(),
match (origin, self.block_execution_strategy) {
(BlockOrigin::NetworkInitialSync, _) | (_, ExecutionStrategy::NativeWhenPossible) =>
ExecutionManager::NativeWhenPossible,
(_, ExecutionStrategy::AlwaysWasm) => ExecutionManager::AlwaysWasm,
_ => ExecutionManager::Both(|wasm_result, native_result| {
let header = import_headers.post();
warn!("Consensus error between wasm and native block execution at block {}", hash);
warn!(" Header {:?}", header);
warn!(" Native result {:?}", native_result);
warn!(" Wasm result {:?}", wasm_result);
telemetry!("block.execute.consensus_failure";
"hash" => ?hash,
"origin" => ?origin,
"header" => ?header
);
wasm_result
}),
},
);
Svyatoslav Nikolsky
committed
let (_, storage_update, changes_update) = r?;
Svyatoslav Nikolsky
committed
(Some(storage_update), Some(changes_update), Some(overlay.into_committed()))
Svyatoslav Nikolsky
committed
None => (None, None, None)
// TODO: non longest-chain rule.
let is_new_best = finalized || import_headers.post().number() > &last_best_number;
let leaf_state = if finalized {
::backend::NewBlockState::Final
} else if is_new_best {
::backend::NewBlockState::Best
} else {
::backend::NewBlockState::Normal
};
trace!("Imported {}, (#{}), best={}, origin={:?}", hash, import_headers.post().number(), is_new_best, origin);
transaction.set_block_data(
import_headers.post().clone(),
leaf_state,
)?;
if let Some(authorities) = authorities {
transaction.update_authorities(authorities);
}
if let Some(storage_update) = storage_update {
transaction.update_storage(storage_update)?;
}
Svyatoslav Nikolsky
committed
if let Some(Some(changes_update)) = changes_update {
transaction.update_changes_trie(changes_update)?;
}
if let Some(storage_changes) = storage_changes {
// TODO [ToDr] How to handle re-orgs? Should we re-emit all storage changes?
self.storage_notifications.lock()
.trigger(&hash, storage_changes);
}
if finalized {
let notification = FinalityNotification::<Block> {
hash,
header: import_headers.post().clone(),
};
self.finality_notification_sinks.lock()
.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
}
let notification = BlockImportNotification::<Block> {
header: import_headers.into_post(),
self.import_notification_sinks.lock()
.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
/// Finalizes all blocks up to given.
fn apply_finality(&self, block: Block::Hash, best_block: Block::Hash, notify: bool) -> error::Result<()> {
// find tree route from last finalized to given block.
let last_finalized = self.backend.blockchain().last_finalized()?;
if block == last_finalized { return Ok(()) }
let route_from_finalized = ::blockchain::tree_route(
self.backend.blockchain(),
BlockId::Hash(last_finalized),
BlockId::Hash(block),
)?;
if let Some(retracted) = route_from_finalized.retracted().get(0) {
warn!("Safety violation: attempted to revert finalized block {:?} which is not in the \
same chain as last finalized {:?}", retracted, last_finalized);
bail!(error::ErrorKind::NotInFinalizedChain);
}
let route_from_best = ::blockchain::tree_route(
self.backend.blockchain(),
BlockId::Hash(best_block),
BlockId::Hash(block),
)?;
// if the block is not a direct ancestor of the current best chain,
// then some other block is the common ancestor.
if route_from_best.common_block().hash != block {
// TODO: reorganize best block to be the best chain containing
// `block`.
}
for finalize_new in route_from_finalized.enacted() {
self.backend.finalize_block(BlockId::Hash(finalize_new.hash))?;
}
if notify {
// sometimes when syncing, tons of blocks can be finalized at once.
// we'll send notifications spuriously in that case.
const MAX_TO_NOTIFY: usize = 256;
let enacted = route_from_finalized.enacted();
let start = enacted.len() - ::std::cmp::min(enacted.len(), MAX_TO_NOTIFY);
let mut sinks = self.finality_notification_sinks.lock();
for finalized in &enacted[start..] {
let header = self.header(&BlockId::Hash(finalized.hash))?
.expect("header already known to exist in DB because it is indicated in the tree route; qed");
let notification = FinalityNotification {
header,
hash: finalized.hash,
};
sinks.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
}
}
Ok(())
}
/// Finalize a block. This will implicitly finalize all blocks up to it and
/// fire finality notifications.
///
/// Pass a flag to indicate whether finality notifications should be propagated.
/// This is usually tied to some synchronization state, where we don't send notifications
/// while performing major synchronization work.
pub fn finalize_block(&self, id: BlockId<Block>, notify: bool) -> error::Result<()> {
let last_best = self.backend.blockchain().info()?.best_hash;
let to_finalize_hash = match id {
BlockId::Hash(h) => h,
BlockId::Number(n) => self.backend.blockchain().hash(n)?
.ok_or_else(|| error::ErrorKind::UnknownBlock(format!("No block with number {:?}", n)))?,
};
self.apply_finality(to_finalize_hash, last_best, notify)
}
/// Attempts to revert the chain by `n` blocks. Returns the number of blocks that were
/// successfully reverted.
pub fn revert(&self, n: NumberFor<Block>) -> error::Result<NumberFor<Block>> {
Ok(self.backend.revert(n)?)
}
pub fn info(&self) -> error::Result<ClientInfo<Block>> {
let info = self.backend.blockchain().info().map_err(|e| error::Error::from_blockchain(Box::new(e)))?;
Ok(ClientInfo {
chain: info,
best_queued_hash: None,
best_queued_number: None,
})
}
/// Get block status.
pub fn block_status(&self, id: &BlockId<Block>) -> error::Result<BlockStatus> {
if let BlockId::Hash(ref h) = id {
if self.importing_block.read().as_ref().map_or(false, |importing| h == importing) {
return Ok(BlockStatus::Queued);
}
}
match self.backend.blockchain().header(*id).map_err(|e| error::Error::from_blockchain(Box::new(e)))?.is_some() {
true => Ok(BlockStatus::InChain),
false => Ok(BlockStatus::Unknown),
}
}
/// Convert an arbitrary block ID into a block hash, returning error if the block is unknown.
fn require_block_number_from_id(&self, id: &BlockId<Block>) -> error::Result<NumberFor<Block>> {
self.backend.blockchain().block_number_from_id(id)
.and_then(|n| n.ok_or_else(|| error::ErrorKind::UnknownBlock(format!("{}", id)).into()))
}
pub fn header(&self, id: &BlockId<Block>) -> error::Result<Option<<Block as BlockT>::Header>> {
self.backend.blockchain().header(*id)
}
/// Get block body by id.
pub fn body(&self, id: &BlockId<Block>) -> error::Result<Option<Vec<<Block as BlockT>::Extrinsic>>> {
self.backend.blockchain().body(*id)
}
/// Get block justification set by id.
pub fn justification(&self, id: &BlockId<Block>) -> error::Result<Option<Justification>> {
self.backend.blockchain().justification(*id)
}
pub fn block(&self, id: &BlockId<Block>)
-> error::Result<Option<SignedBlock<Block>>>
Ok(match (self.header(id)?, self.body(id)?, self.justification(id)?) {
(Some(header), Some(extrinsics), Some(justification)) =>
Some(SignedBlock { block: Block::new(header, extrinsics), justification }),
/// Get best block header.
pub fn best_block_header(&self) -> error::Result<<Block as BlockT>::Header> {
let info = self.backend.blockchain().info().map_err(|e| error::Error::from_blockchain(Box::new(e)))?;
Ok(self.header(&BlockId::Hash(info.best_hash))?.expect("Best block header must always exist"))
}
/// Get the most recent block hash of the best (longest) chains
/// that contain block with the given `target_hash`.
/// If `maybe_max_block_number` is `Some(max_block_number)`
/// the search is limited to block `numbers <= max_block_number`.
/// in other words as if there were no blocks greater `max_block_number`.
/// TODO [snd] possibly implement this on blockchain::Backend and just redirect here
/// Returns `Ok(None)` if `target_hash` is not found in search space.
/// TODO [snd] write down time complexity
pub fn best_containing(&self, target_hash: Block::Hash, maybe_max_number: Option<NumberFor<Block>>)
-> error::Result<Option<Block::Hash>>
{
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
let target_header = {
match self.backend.blockchain().header(BlockId::Hash(target_hash))? {
Some(x) => x,
// target not in blockchain
None => { return Ok(None); },
}
};
if let Some(max_number) = maybe_max_number {
// target outside search range
if target_header.number() > &max_number {
return Ok(None);
}
}
let (leaves, best_already_checked) = {
// ensure no blocks are imported during this code block.
// an import could trigger a reorg which could change the canonical chain.
// we depend on the canonical chain staying the same during this code block.
let _import_lock = self.import_lock.lock();
let info = self.backend.blockchain().info()?;
let canon_hash = self.backend.blockchain().hash(*target_header.number())?
.ok_or_else(|| error::Error::from(format!("failed to get hash for block number {}", target_header.number())))?;
if canon_hash == target_hash {
if let Some(max_number) = maybe_max_number {
// something has to guarantee that max_number is in chain
return Ok(Some(self.backend.blockchain().hash(max_number)?.ok_or_else(|| error::Error::from(format!("failed to get hash for block number {}", max_number)))?));
} else {
return Ok(Some(info.best_hash));
}
}
(self.backend.blockchain().leaves()?, info.best_hash)
};
// for each chain. longest chain first. shortest last
for leaf_hash in leaves {
// ignore canonical chain which we already checked above
if leaf_hash == best_already_checked {
continue;
}
// start at the leaf
let mut current_hash = leaf_hash;
// if search is not restricted then the leaf is the best
let mut best_hash = leaf_hash;
// go backwards entering the search space
// waiting until we are <= max_number
if let Some(max_number) = maybe_max_number {
loop {
// TODO [snd] this should be a panic
let current_header = self.backend.blockchain().header(BlockId::Hash(current_hash.clone()))?
.ok_or_else(|| error::Error::from(format!("failed to get header for hash {}", current_hash)))?;
if current_header.number() <= &max_number {
best_hash = current_header.hash();
break;
}
current_hash = *current_header.parent_hash();
}
}
// go backwards through the chain (via parent links)
loop {
// until we find target
if current_hash == target_hash {
return Ok(Some(best_hash));
}
// TODO [snd] this should be a panic
let current_header = self.backend.blockchain().header(BlockId::Hash(current_hash.clone()))?
.ok_or_else(|| error::Error::from(format!("failed to get header for hash {}", current_hash)))?;
// stop search in this chain once we go below the target's block number
if current_header.number() < target_header.number() {
break;
}
current_hash = *current_header.parent_hash();
}
}
unreachable!("this is a bug. `target_hash` is in blockchain but wasn't found following all leaves backwards");
}
fn changes_trie_config(&self) -> Result<Option<ChangesTrieConfiguration>, Error> {
Ok(self.backend.state_at(BlockId::Number(self.backend.blockchain().info()?.best_number))?
.storage(well_known_keys::CHANGES_TRIE_CONFIG)
.map_err(|e| error::Error::from_state(Box::new(e)))?
.and_then(|c| Decode::decode(&mut &*c)))
}
impl<B, E, Block, RA> ChainHeaderBackend<Block> for Client<B, E, Block, RA> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync,
Block: BlockT<Hash=H256>,
RA: Send + Sync
{
fn header(&self, id: BlockId<Block>) -> error::Result<Option<Block::Header>> {
self.backend.blockchain().header(id)
}
fn info(&self) -> error::Result<blockchain::Info<Block>> {
self.backend.blockchain().info()
}
fn status(&self, id: BlockId<Block>) -> error::Result<blockchain::BlockStatus> {
self.backend.blockchain().status(id)
}
fn number(&self, hash: Block::Hash) -> error::Result<Option<<<Block as BlockT>::Header as HeaderT>::Number>> {