Skip to content
client.rs 92.8 KiB
Newer Older
// Copyright 2017-2019 Parity Technologies (UK) Ltd.
Gav Wood's avatar
Gav Wood committed
// This file is part of Substrate.

// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.

// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU General Public License for more details.

// You should have received a copy of the GNU General Public License
// along with Substrate.  If not, see <http://www.gnu.org/licenses/>.

//! Substrate Client

use std::{
	marker::PhantomData, collections::{HashSet, BTreeMap, HashMap}, sync::Arc,
	panic::UnwindSafe, result, cell::RefCell,
use log::{info, trace, warn};
use parking_lot::{Mutex, RwLock};
use codec::{Encode, Decode};
use hash_db::{Hasher, Prefix};
use primitives::{
	Blake2Hasher, H256, ChangesTrieConfiguration, convert_hash,
	NeverNativeValue, ExecutionContext, NativeOrEncoded,
	storage::{StorageKey, StorageData, well_known_keys, ChildInfo},
	traits::CodeExecutor,
use sc_telemetry::{telemetry, SUBSTRATE_INFO};
use sp_runtime::{
	Justification, BuildStorage,
	generic::{BlockId, SignedBlock, DigestItem},
	traits::{
		Block as BlockT, Header as HeaderT, Zero, NumberFor,
		ApiRef, ProvideRuntimeApi, SaturatedConversion, One, DigestFor,
	},
	DBValue, Backend as StateBackend, ChangesTrieAnchorBlockId, ExecutionStrategy, ExecutionManager,
	prove_read, prove_child_read, ChangesTrieRootsStorage, ChangesTrieStorage,
	ChangesTrieTransaction, ChangesTrieConfigurationRange, key_changes, key_changes_proof,
	OverlayedChanges, BackendTrustLevel, StorageProof, merge_storage_proofs,
Arkadiy Paronyan's avatar
Arkadiy Paronyan committed
use executor::{RuntimeVersion, RuntimeInfo};
use consensus::{
	Error as ConsensusError, BlockStatus, BlockImportParams, BlockCheckParams,
	ImportResult, BlockOrigin, ForkChoiceStrategy,
	SelectChain, self,
};
use sp_blockchain::{self as blockchain,
	Backend as ChainBackend,
	HeaderBackend as ChainHeaderBackend, ProvideCache, Cache,
	well_known_cache_keys::Id as CacheKeyId,
	HeaderMetadata, CachedHeaderMetadata,
};
use sp_api::{CallRuntimeAt, ConstructRuntimeApi, Core as CoreApi, ProofRecorder, InitializeBlock};
use block_builder::BlockBuilderApi;

	backend::{
		self, BlockImportOperation, PrunableStateChangesTrieStorage,
		ClientImportOperation, Finalizer, ImportSummary, NewBlockState,
	client::{
		ImportNotifications, FinalityNotification, FinalityNotifications, BlockImportNotification,
		ClientInfo, BlockchainEvents, BlockBody, ProvideUncles, ForkBlocks,
	execution_extensions::{ExecutionExtensions, ExecutionStrategies},
	notifications::{StorageNotifications, StorageEventStream},
	CallExecutor,
use crate::{
	call_executor::LocalCallExecutor,
	light::{call_executor::prove_execution, fetcher::ChangesProof},
	in_mem, genesis, cht,
};
Gavin Wood's avatar
Gavin Wood committed
type StorageUpdate<B, Block> = <
	<
		<B as backend::Backend<Block, Blake2Hasher>>::BlockImportOperation
			as BlockImportOperation<Block, Blake2Hasher>
	>::State as state_machine::Backend<Blake2Hasher>>::Transaction;
type ChangesUpdate<Block> = ChangesTrieTransaction<Blake2Hasher, NumberFor<Block>>;
Arkadiy Paronyan's avatar
Arkadiy Paronyan committed
/// Substrate Client
pub struct Client<B, E, Block, RA> where Block: BlockT {
Gav Wood's avatar
Gav Wood committed
	executor: E,
	storage_notifications: Mutex<StorageNotifications<Block>>,
Gav Wood's avatar
Gav Wood committed
	import_notification_sinks: Mutex<Vec<mpsc::UnboundedSender<BlockImportNotification<Block>>>>,
	finality_notification_sinks: Mutex<Vec<mpsc::UnboundedSender<FinalityNotification<Block>>>>,
	// holds the block hash currently being imported. TODO: replace this with block queue
	importing_block: RwLock<Option<Block::Hash>>,
	fork_blocks: ForkBlocks<Block>,
	execution_extensions: ExecutionExtensions<Block>,
// used in importing a block, where additional changes are made after the runtime
// executed.
enum PrePostHeader<H> {
	// they are the same: no post-runtime digest items.
	Same(H),
	// different headers (pre, post).
	Different(H, H),
}

impl<H> PrePostHeader<H> {
	// get a reference to the "pre-header" -- the header as it should be just after the runtime.
	fn pre(&self) -> &H {
		match *self {
			PrePostHeader::Same(ref h) => h,
			PrePostHeader::Different(ref h, _) => h,
		}
	}

	// get a reference to the "post-header" -- the header as it should be after all changes are applied.
	fn post(&self) -> &H {
		match *self {
			PrePostHeader::Same(ref h) => h,
			PrePostHeader::Different(_, ref h) => h,
		}
	}

	// convert to the "post-header" -- the header as it should be after all changes are applied.
	fn into_post(self) -> H {
		match self {
			PrePostHeader::Same(h) => h,
			PrePostHeader::Different(_, h) => h,
		}
	}
}

Gav Wood's avatar
Gav Wood committed
/// Create an instance of in-memory client.
pub fn new_in_mem<E, Block, S, RA>(
Gav Wood's avatar
Gav Wood committed
	executor: E,
Gav Wood's avatar
Gav Wood committed
	genesis_storage: S,
	keystore: Option<primitives::traits::BareCryptoStorePtr>,
Gavin Wood's avatar
Gavin Wood committed
	in_mem::Backend<Block, Blake2Hasher>,
	LocalCallExecutor<in_mem::Backend<Block, Blake2Hasher>, E>,
	Block,
	RA
>> where
	E: CodeExecutor + RuntimeInfo,
Gavin Wood's avatar
Gavin Wood committed
	S: BuildStorage,
	Block: BlockT<Hash=H256>,
	new_with_backend(Arc::new(in_mem::Backend::new()), executor, genesis_storage, keystore)
/// Create a client with the explicitly provided backend.
/// This is useful for testing backend implementations.
pub fn new_with_backend<B, E, Block, S, RA>(
	backend: Arc<B>,
	executor: E,
	build_genesis_storage: S,
	keystore: Option<primitives::traits::BareCryptoStorePtr>,
) -> sp_blockchain::Result<Client<B, LocalCallExecutor<B, E>, Block, RA>>
		E: CodeExecutor + RuntimeInfo,
		S: BuildStorage,
		Block: BlockT<Hash=H256>,
		B: backend::LocalBackend<Block, Blake2Hasher>
	let call_executor = LocalCallExecutor::new(backend.clone(), executor);
	let extensions = ExecutionExtensions::new(Default::default(), keystore);
	Client::new(backend, call_executor, build_genesis_storage, Default::default(), extensions)
impl<B, E, Block, RA> BlockOf for Client<B, E, Block, RA> where
	B: backend::Backend<Block, Blake2Hasher>,
	E: CallExecutor<Block, Blake2Hasher>,
	Block: BlockT<Hash=H256>,
{
	type Type = Block;
}

impl<B, E, Block, RA> Client<B, E, Block, RA> where
	B: backend::Backend<Block, Blake2Hasher>,
	E: CallExecutor<Block, Blake2Hasher>,
	Block: BlockT<Hash=H256>,
Arkadiy Paronyan's avatar
Arkadiy Paronyan committed
	/// Creates new Substrate Client with given blockchain and code executor.
	pub fn new<S: BuildStorage>(
Gav Wood's avatar
Gav Wood committed
		executor: E,
		build_genesis_storage: S,
		fork_blocks: ForkBlocks<Block>,
		execution_extensions: ExecutionExtensions<Block>,
Gav Wood's avatar
Gav Wood committed
		if backend.blockchain().header(BlockId::Number(Zero::zero()))?.is_none() {
			let genesis_storage = build_genesis_storage.build_storage()?;
			let mut op = backend.begin_operation()?;
			backend.begin_state_operation(&mut op, BlockId::Hash(Default::default()))?;
			let state_root = op.reset_storage(genesis_storage)?;
			let genesis_block = genesis::construct_genesis_block::<Block>(state_root.into());
Gavin Wood's avatar
Gavin Wood committed
			info!("Initializing Genesis block/state (state: {}, header-hash: {})",
				genesis_block.header().state_root(),
				genesis_block.header().hash()
			);
			op.set_block_data(
				genesis_block.deconstruct().0,
				Some(vec![]),
				None,
Gav Wood's avatar
Gav Wood committed
			backend.commit_operation(op)?;
		}
Gav Wood's avatar
Gav Wood committed
		Ok(Client {
			backend,
			executor,
			storage_notifications: Default::default(),
			import_notification_sinks: Default::default(),
			finality_notification_sinks: Default::default(),
			importing_block: Default::default(),
			fork_blocks,
			execution_extensions,
	/// Get a reference to the execution extensions.
	pub fn execution_extensions(&self) -> &ExecutionExtensions<Block> {
		&self.execution_extensions
Gav Wood's avatar
Gav Wood committed
	/// Get a reference to the state at a given block.
	pub fn state_at(&self, block: &BlockId<Block>) -> sp_blockchain::Result<B::State> {
Gav Wood's avatar
Gav Wood committed
		self.backend.state_at(*block)
	}

	/// Given a `BlockId` and a key prefix, return the matching child storage keys in that block.
	pub fn storage_keys(&self, id: &BlockId<Block>, key_prefix: &StorageKey) -> sp_blockchain::Result<Vec<StorageKey>> {
		let keys = self.state_at(id)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect();
		Ok(keys)
	}

	/// Given a `BlockId` and a key, return the value under the key in that block.
	pub fn storage(&self, id: &BlockId<Block>, key: &StorageKey) -> sp_blockchain::Result<Option<StorageData>> {
			.storage(&key.0).map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
			.map(StorageData)
		)
	/// Given a `BlockId` and a key, return the value under the hash in that block.
	pub fn storage_hash(&self, id: &BlockId<Block>, key: &StorageKey)
			.storage_hash(&key.0).map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
	/// Given a `BlockId`, a key prefix, and a child storage key, return the matching child storage keys.
	pub fn child_storage_keys(
		&self,
		id: &BlockId<Block>,
		child_storage_key: &StorageKey,
		child_info: ChildInfo,
		key_prefix: &StorageKey
		let keys = self.state_at(id)?
			.child_keys(&child_storage_key.0, child_info, &key_prefix.0)
			.into_iter()
			.map(StorageKey)
			.collect();
		Ok(keys)
	}

	/// Given a `BlockId`, a key and a child storage key, return the value under the key in that block.
	pub fn child_storage(
		&self,
		id: &BlockId<Block>,
		storage_key: &StorageKey,
		child_info: ChildInfo,
		key: &StorageKey
		Ok(self.state_at(id)?
			.child_storage(&storage_key.0, child_info, &key.0)
			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
	/// Given a `BlockId`, a key and a child storage key, return the hash under the key in that block.
	pub fn child_storage_hash(
		&self,
		id: &BlockId<Block>,
		storage_key: &StorageKey,
		child_info: ChildInfo,
			.child_storage_hash(&storage_key.0, child_info, &key.0)
			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
Gav Wood's avatar
Gav Wood committed
	/// Get the code at a given block.
	pub fn code_at(&self, id: &BlockId<Block>) -> sp_blockchain::Result<Vec<u8>> {
		Ok(self.storage(id, &StorageKey(well_known_keys::CODE.to_vec()))?
Gavin Wood's avatar
Gavin Wood committed
			.expect("None is returned if there's no value stored for the given key;\
				':code' key is always defined; qed").0)
	/// Get the RuntimeVersion at a given block.
	pub fn runtime_version_at(&self, id: &BlockId<Block>) -> sp_blockchain::Result<RuntimeVersion> {
		self.executor.runtime_version(id)
	/// Get call executor reference.
	pub fn executor(&self) -> &E {
		&self.executor
	/// Reads storage value at a given block + key, returning read proof.
	pub fn read_proof<I>(&self, id: &BlockId<Block>, keys: I) -> sp_blockchain::Result<StorageProof> where
		I: IntoIterator,
		I::Item: AsRef<[u8]>,
	{
			.and_then(|state| prove_read(state, keys)
				.map_err(Into::into))
cheme's avatar
cheme committed
	}

	/// Reads child storage value at a given block + storage_key + key, returning
	/// read proof.
cheme's avatar
cheme committed
		&self,
		id: &BlockId<Block>,
		storage_key: &[u8],
		child_info: ChildInfo,
		I: IntoIterator,
		I::Item: AsRef<[u8]>,
	{
cheme's avatar
cheme committed
		self.state_at(id)
			.and_then(|state| prove_child_read(state, storage_key, child_info, keys)
cheme's avatar
cheme committed
				.map_err(Into::into))
	/// Execute a call to a contract on top of state in a block of given hash
	/// AND returning execution proof.
Gav Wood's avatar
Gav Wood committed
	///
	/// No changes are made.
Gavin Wood's avatar
Gavin Wood committed
	pub fn execution_proof(&self,
		id: &BlockId<Block>,
		method: &str,
		call_data: &[u8]
	) -> sp_blockchain::Result<(Vec<u8>, StorageProof)> {
		let state = self.state_at(id)?;
		let header = self.prepare_environment_block(id)?;
		prove_execution(state, header, &self.executor, method, call_data)
	/// Reads given header and generates CHT-based header proof.
	pub fn header_proof(&self, id: &BlockId<Block>) -> sp_blockchain::Result<(Block::Header, StorageProof)> {
		self.header_proof_with_cht_size(id, cht::size())
Gavin Wood's avatar
Gavin Wood committed
	pub fn block_hash(&self,
		block_number: <<Block as BlockT>::Header as HeaderT>::Number
		self.backend.blockchain().hash(block_number)
	}

	/// Reads given header and generates CHT-based header proof for CHT of given size.
Gavin Wood's avatar
Gavin Wood committed
		id: &BlockId<Block>,
	) -> sp_blockchain::Result<(Block::Header, StorageProof)> {
		let proof_error = || sp_blockchain::Error::Backend(format!("Failed to generate header proof for {:?}", id));
		let header = self.backend.blockchain().expect_header(*id)?;
		let block_num = *header.number();
		let cht_num = cht::block_to_cht_number(cht_size, block_num).ok_or_else(proof_error)?;
		let cht_start = cht::start_number(cht_size, cht_num);
		let mut current_num = cht_start;
		let cht_range = ::std::iter::from_fn(|| {
			let old_current_num = current_num;
			current_num = current_num + One::one();
			Some(old_current_num)
		});
		let headers = cht_range.map(|num| self.block_hash(num));
		let proof = cht::build_proof::<Block::Header, Blake2Hasher, _, _>(cht_size, cht_num, ::std::iter::once(block_num), headers)?;
	/// Get longest range within [first; last] that is possible to use in `key_changes`
	/// and `key_changes_proof` calls.
	/// Range could be shortened from the beginning if some changes tries have been pruned.
B YI's avatar
B YI committed
	/// Returns Ok(None) if changes tries are not supported.
	pub fn max_key_changes_range(
		&self,
		first: NumberFor<Block>,
		last: BlockId<Block>,
	) -> sp_blockchain::Result<Option<(NumberFor<Block>, BlockId<Block>)>> {
		let (config, storage) = match self.require_changes_trie().ok() {
			Some((config, storage)) => (config, storage),
			None => return Ok(None),
		};
		let last_num = self.backend.blockchain().expect_block_number_from_id(&last)?;
		if first > last_num {
			return Err(sp_blockchain::Error::ChangesTrieAccessFailed("Invalid changes trie range".into()));
		let finalized_number = self.backend.blockchain().info().finalized_number;
		let oldest = storage.oldest_changes_trie_block(&config, finalized_number);
		let first = ::std::cmp::max(first, oldest);
	/// Get pairs of (block, extrinsic) where key has been changed at given blocks range.
	/// Works only for runtimes that are supporting changes tries.
	///
	/// Changes are returned in descending order (i.e. last block comes first).
	pub fn key_changes(
		&self,
		first: NumberFor<Block>,
		last: BlockId<Block>,
		storage_key: Option<&StorageKey>,
	) -> sp_blockchain::Result<Vec<(NumberFor<Block>, u32)>> {
		let (config, storage) = self.require_changes_trie()?;
		let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?;
		let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?;
		// FIXME: remove this in https://github.com/paritytech/substrate/pull/3201
		let config_range = ChangesTrieConfigurationRange {
			config: &config,
			zero: Zero::zero(),
			end: None,
		};

		key_changes::<Blake2Hasher, _>(
			config_range,
			&ChangesTrieAnchorBlockId {
				hash: convert_hash(&last_hash),
			self.backend.blockchain().info().best_number,
			storage_key.as_ref().map(|sk| sk.0.as_slice()),
		.and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::<Result<_, _>>())
		.map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))
	}

	/// Get proof for computation of (block, extrinsic) pairs where key has been changed at given blocks range.
	/// `min` is the hash of the first block, which changes trie root is known to the requester - when we're using
	/// changes tries from ascendants of this block, we should provide proofs for changes tries roots
	/// `max` is the hash of the last block known to the requester - we can't use changes tries from descendants
	/// of this block.
	/// Works only for runtimes that are supporting changes tries.
	pub fn key_changes_proof(
		&self,
		first: Block::Hash,
		last: Block::Hash,
		storage_key: Option<&StorageKey>,
		key: &StorageKey,
	) -> sp_blockchain::Result<ChangesProof<Block::Header>> {
		self.key_changes_proof_with_cht_size(
			first,
			last,
			min,
			max,
			storage_key,
		)
	}

	/// Does the same work as `key_changes_proof`, but assumes that CHTs are of passed size.
	pub fn key_changes_proof_with_cht_size(
		&self,
		first: Block::Hash,
		last: Block::Hash,
		min: Block::Hash,
		max: Block::Hash,
		storage_key: Option<&StorageKey>,
	) -> sp_blockchain::Result<ChangesProof<Block::Header>> {
		struct AccessedRootsRecorder<'a, Block: BlockT> {
			storage: &'a dyn ChangesTrieStorage<Blake2Hasher, NumberFor<Block>>,
			required_roots_proofs: Mutex<BTreeMap<NumberFor<Block>, H256>>,
		};

		impl<'a, Block: BlockT> ChangesTrieRootsStorage<Blake2Hasher, NumberFor<Block>> for AccessedRootsRecorder<'a, Block> {
			fn build_anchor(&self, hash: H256) -> Result<ChangesTrieAnchorBlockId<H256, NumberFor<Block>>, String> {
				self.storage.build_anchor(hash)
			}

			fn root(
				&self,
				anchor: &ChangesTrieAnchorBlockId<H256, NumberFor<Block>>,
				block: NumberFor<Block>,
			) -> Result<Option<H256>, String> {
				let root = self.storage.root(anchor, block)?;
				if block < self.min {
					if let Some(ref root) = root {
						self.required_roots_proofs.lock().insert(
		impl<'a, Block: BlockT> ChangesTrieStorage<Blake2Hasher, NumberFor<Block>> for AccessedRootsRecorder<'a, Block> {
			fn as_roots_storage(&self) -> &dyn state_machine::ChangesTrieRootsStorage<Blake2Hasher, NumberFor<Block>> {
				self
			}

			fn with_cached_changed_keys(
				&self,
				root: &H256,
				functor: &mut dyn FnMut(&HashMap<Option<Vec<u8>>, HashSet<Vec<u8>>>),
			) -> bool {
				self.storage.with_cached_changed_keys(root, functor)
			}

cheme's avatar
cheme committed
			fn get(&self, key: &H256, prefix: Prefix) -> Result<Option<DBValue>, String> {
				self.storage.get(key, prefix)
		let (config, storage) = self.require_changes_trie()?;
		let min_number = self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(min))?;
		let recording_storage = AccessedRootsRecorder::<Block> {
			storage,
			required_roots_proofs: Mutex::new(BTreeMap::new()),
		};

		let max_number = ::std::cmp::min(
			self.backend.blockchain().info().best_number,
			self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(max))?,
		// FIXME: remove this in https://github.com/paritytech/substrate/pull/3201
		let config_range = ChangesTrieConfigurationRange {
			config: &config,
			zero: Zero::zero(),
			end: None,
		};

Gavin Wood's avatar
Gavin Wood committed
		let first_number = self.backend.blockchain()
			.expect_block_number_from_id(&BlockId::Hash(first))?;
Gavin Wood's avatar
Gavin Wood committed
		let last_number = self.backend.blockchain()
			.expect_block_number_from_id(&BlockId::Hash(last))?;
		let key_changes_proof = key_changes_proof::<Blake2Hasher, _>(
			config_range,
			&ChangesTrieAnchorBlockId {
				hash: convert_hash(&last),
			storage_key.as_ref().map(|sk| sk.0.as_slice()),
			&key.0,
		.map_err(|err| sp_blockchain::Error::from(sp_blockchain::Error::ChangesTrieAccessFailed(err)))?;

		// now gather proofs for all changes tries roots that were touched during key_changes_proof
		// execution AND are unknown (i.e. replaced with CHT) to the requester
		let roots = recording_storage.required_roots_proofs.into_inner();
		let roots_proof = self.changes_trie_roots_proof(cht_size, roots.keys().cloned())?;

		Ok(ChangesProof {
			max_block: max_number,
			proof: key_changes_proof,
			roots: roots.into_iter().map(|(n, h)| (n, convert_hash(&h))).collect(),
			roots_proof,
		})
	}

	/// Generate CHT-based proof for roots of changes tries at given blocks.
	fn changes_trie_roots_proof<I: IntoIterator<Item=NumberFor<Block>>>(
		&self,
		// most probably we have touched several changes tries that are parts of the single CHT
		// => GroupBy changes tries by CHT number and then gather proof for the whole group at once
		let mut proofs = Vec::new();

		cht::for_each_cht_group::<Block::Header, _, _, _>(cht_size, blocks, |_, cht_num, cht_blocks| {
			let cht_proof = self.changes_trie_roots_proof_at_cht(cht_size, cht_num, cht_blocks)?;
			proofs.push(cht_proof);
		Ok(merge_storage_proofs(proofs))
	}

	/// Generates CHT-based proof for roots of changes tries at given blocks (that are part of single CHT).
	fn changes_trie_roots_proof_at_cht(
		&self,
		cht_num: NumberFor<Block>,
		blocks: Vec<NumberFor<Block>>
		let cht_start = cht::start_number(cht_size, cht_num);
		let mut current_num = cht_start;
		let cht_range = ::std::iter::from_fn(|| {
			let old_current_num = current_num;
			current_num = current_num + One::one();
			Some(old_current_num)
		});
		let roots = cht_range
			.map(|num| self.header(&BlockId::Number(num))
			.map(|block| block.and_then(|block| block.digest().log(DigestItem::as_changes_trie_root).cloned())));
		let proof = cht::build_proof::<Block::Header, Blake2Hasher, _, _>(cht_size, cht_num, blocks, roots)?;
		Ok(proof)
	/// Returns changes trie configuration and storage or an error if it is not supported.
	fn require_changes_trie(&self) -> sp_blockchain::Result<(ChangesTrieConfiguration, &B::ChangesTrieStorage)> {
		let config = self.changes_trie_config()?;
		let storage = self.backend.changes_trie_storage();
		match (config, storage) {
			(Some(config), Some(storage)) => Ok((config, storage)),
			_ => Err(sp_blockchain::Error::ChangesTriesNotSupported.into()),
Gav Wood's avatar
Gav Wood committed
	/// Create a new block, built on the head of the chain.
	pub fn new_block(
		inherent_digests: DigestFor<Block>,
	) -> sp_blockchain::Result<block_builder::BlockBuilder<Block, Self>> where
		RA: Send + Sync,
		Self: ProvideRuntimeApi,
		<Self as ProvideRuntimeApi>::Api: BlockBuilderApi<Block, Error = Error>
		let info = self.info();
		block_builder::BlockBuilder::new(
			self,
			info.chain.best_hash,
			info.chain.best_number,
			false,
			inherent_digests,
		)
Gav Wood's avatar
Gav Wood committed
	}

	/// Create a new block, built on top of `parent`.
	pub fn new_block_at(
		&self,
		parent: &BlockId<Block>,
		inherent_digests: DigestFor<Block>,
	) -> sp_blockchain::Result<block_builder::BlockBuilder<Block, Self>> where
		RA: Send + Sync,
		Self: ProvideRuntimeApi,
		<Self as ProvideRuntimeApi>::Api: BlockBuilderApi<Block, Error = Error>
		block_builder::BlockBuilder::new(
			self,
			self.expect_block_hash_from_id(parent)?,
			self.expect_block_number_from_id(parent)?,
			false,
			inherent_digests,
		)
	}

	/// Create a new block, built on top of `parent` with proof recording enabled.
	///
	/// While proof recording is enabled, all accessed trie nodes are saved.
	/// These recorded trie nodes can be used by a third party to proof the
	/// output of this block builder without having access to the full storage.
	pub fn new_block_at_with_proof_recording(
		&self,
		parent: &BlockId<Block>,
		inherent_digests: DigestFor<Block>,
	) -> sp_blockchain::Result<block_builder::BlockBuilder<Block, Self>> where
		E: Clone + Send + Sync,
		RA: Send + Sync,
		Self: ProvideRuntimeApi,
		<Self as ProvideRuntimeApi>::Api: BlockBuilderApi<Block, Error = Error>
		block_builder::BlockBuilder::new(
			self,
			self.expect_block_hash_from_id(parent)?,
			self.expect_block_number_from_id(parent)?,
			true,
			inherent_digests,
		)
	/// Lock the import lock, and run operations inside.
	pub fn lock_import_and_run<R, Err, F>(&self, f: F) -> Result<R, Err> where
		F: FnOnce(&mut ClientImportOperation<Block, Blake2Hasher, B>) -> Result<R, Err>,
			let _import_lock = self.backend.get_import_lock().write();

			let mut op = ClientImportOperation {
				op: self.backend.begin_operation()?,
				notify_imported: None,
				notify_finalized: Vec::new(),
			};

			let r = f(&mut op)?;

			let ClientImportOperation { op, notify_imported, notify_finalized } = op;
			self.backend.commit_operation(op)?;
			self.notify_finalized(notify_finalized)?;

			if let Some(notify_imported) = notify_imported {
				self.notify_imported(notify_imported)?;
			}

			Ok(r)
		};

		let result = inner();
		*self.importing_block.write() = None;

		result
	}

	/// Apply a checked and validated block to an operation. If a justification is provided
	/// then `finalized` *must* be true.
	fn apply_block(
		&self,
		operation: &mut ClientImportOperation<Block, Blake2Hasher, B>,
		import_block: BlockImportParams<Block>,
		new_cache: HashMap<CacheKeyId, Vec<u8>>,
		E: CallExecutor<Block, Blake2Hasher> + Send + Sync + Clone,
	{
		let BlockImportParams {
			origin,
			header,
			justification,
			post_digests,
			body,
			finalized,
			auxiliary,
			fork_choice,
			import_existing,
		} = import_block;

		assert!(justification.is_some() && finalized || justification.is_none());

		let parent_hash = header.parent_hash().clone();
		let mut enact_state = true;
		match self.block_status(&BlockId::Hash(parent_hash))? {
			BlockStatus::Unknown => return Ok(ImportResult::UnknownParent),
			BlockStatus::InChainWithState | BlockStatus::Queued => {},
			BlockStatus::InChainPruned if allow_missing_state => {
				enact_state = false;
			},
			BlockStatus::InChainPruned => return Ok(ImportResult::MissingState),
			BlockStatus::KnownBad => return Ok(ImportResult::KnownBad),
		}

		let import_headers = if post_digests.is_empty() {
			PrePostHeader::Same(header)
		} else {
			let mut post_header = header.clone();
			for item in post_digests {
				post_header.digest_mut().push(item);
			}
			PrePostHeader::Different(header, post_header)
		};

		let hash = import_headers.post().hash();
Gavin Wood's avatar
Gavin Wood committed
		let height = (*import_headers.post().number()).saturated_into::<u64>();

		*self.importing_block.write() = Some(hash);

		let result = self.execute_and_import_block(
			operation,
			origin,
			hash,
			import_headers,
			justification,
			body,
			finalized,
			auxiliary,
			fork_choice,
			import_existing,
		if let Ok(ImportResult::Imported(ref aux)) = result {
			if aux.is_new_best {
				telemetry!(SUBSTRATE_INFO; "block.import";
					"height" => height,
					"best" => ?hash,
					"origin" => ?origin
				);
			}
		}
	fn execute_and_import_block(
		&self,
		operation: &mut ClientImportOperation<Block, Blake2Hasher, B>,
Gav Wood's avatar
Gav Wood committed
		hash: Block::Hash,
		import_headers: PrePostHeader<Block::Header>,
		justification: Option<Justification>,
Gav Wood's avatar
Gav Wood committed
		body: Option<Vec<Block::Extrinsic>>,
		new_cache: HashMap<CacheKeyId, Vec<u8>>,
		aux: Vec<(Vec<u8>, Option<Vec<u8>>)>,
		fork_choice: ForkChoiceStrategy,
		import_existing: bool,
		E: CallExecutor<Block, Blake2Hasher> + Send + Sync + Clone,
	{
		let parent_hash = import_headers.post().parent_hash().clone();
		let status = self.backend.blockchain().status(BlockId::Hash(hash))?;
		match (import_existing, status) {
			(false, blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain),
			(false, blockchain::BlockStatus::Unknown) => {},
			(true, blockchain::BlockStatus::InChain) =>  {},
			(true, blockchain::BlockStatus::Unknown) => return Err(Error::UnknownBlock(format!("{:?}", hash))),
		let info = self.backend.blockchain().info();

		// the block is lower than our last finalized block so it must revert
		// finality, refusing import.
		if *import_headers.post().number() <= info.finalized_number {
			return Err(sp_blockchain::Error::NotInFinalizedChain);
		// this is a fairly arbitrary choice of where to draw the line on making notifications,
		// but the general goal is to only make notifications when we are already fully synced
		// and get a new chain head.
		let make_notifications = match origin {
			BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => true,
			BlockOrigin::Genesis | BlockOrigin::NetworkInitialSync | BlockOrigin::File => false,
		};

		let storage_changes = match &body {
			Some(body) if enact_state => {
				self.backend.begin_state_operation(&mut operation.op, BlockId::Hash(parent_hash))?;
				// ensure parent block is finalized to maintain invariant that
				// finality is called sequentially.
				if finalized {
					self.apply_finality_with_block_hash(operation, parent_hash, None, info.best_hash, make_notifications)?;
				}
				// FIXME #1232: correct path logic for when to execute this function
				let (storage_update, changes_update, storage_changes) = self.block_execution(
					&operation.op,
					&import_headers,
					origin,
					hash,
					&body,
				)?;

				operation.op.update_cache(new_cache);
				if let Some(storage_update) = storage_update {
					operation.op.update_db_storage(storage_update)?;
				}
				if let Some(storage_changes) = storage_changes.clone() {
					operation.op.update_storage(storage_changes.0, storage_changes.1)?;
				}
				if let Some(Some(changes_update)) = changes_update {
					operation.op.update_changes_trie(changes_update)?;
				}
				storage_changes
			},
			_ => None,
		};
		let is_new_best = finalized || match fork_choice {
			ForkChoiceStrategy::LongestChain => import_headers.post().number() > &info.best_number,
			ForkChoiceStrategy::Custom(v) => v,
		};
		let leaf_state = if finalized {
		} else if is_new_best {
		let retracted = if is_new_best {
				self.backend.blockchain(),
				info.best_hash,
				parent_hash,
			)?;
			route_from_best.retracted().iter().rev().map(|e| e.hash.clone()).collect()
		} else {
			Vec::default()
		};

		trace!("Imported {}, (#{}), best={}, origin={:?}", hash, import_headers.post().number(), is_new_best, origin);
		operation.op.set_block_data(
			import_headers.post().clone(),
		operation.op.insert_aux(aux)?;
		if make_notifications {
			if finalized {
				operation.notify_finalized.push(hash);
			operation.notify_imported = Some(ImportSummary {
				hash,
				origin,
				header: import_headers.into_post(),
				is_new_best,
				storage_changes,
				retracted,
			})
		Ok(ImportResult::imported(is_new_best))
		transaction: &B::BlockImportOperation,
		import_headers: &PrePostHeader<Block::Header>,
		origin: BlockOrigin,
		hash: Block::Hash,
		body: &[Block::Extrinsic],
		Option<StorageUpdate<B, Block>>,
		Option<Option<ChangesUpdate<Block>>>,
		Option<(
			Vec<(Vec<u8>, Option<Vec<u8>>)>,
			Vec<(Vec<u8>, Vec<(Vec<u8>, Option<Vec<u8>>)>)>
		)>
		where
			E: CallExecutor<Block, Blake2Hasher> + Send + Sync + Clone,
	{
		match transaction.state()? {
			Some(transaction_state) => {
				let mut overlay = Default::default();
				let get_execution_manager = |execution_strategy: ExecutionStrategy| {
					match execution_strategy {
						ExecutionStrategy::NativeElseWasm => ExecutionManager::NativeElseWasm,
						ExecutionStrategy::AlwaysWasm => ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted),
						ExecutionStrategy::NativeWhenPossible => ExecutionManager::NativeWhenPossible,
						ExecutionStrategy::Both => ExecutionManager::Both(|wasm_result, native_result| {
							let header = import_headers.post();
							warn!("Consensus error between wasm and native block execution at block {}", hash);
							warn!("   Header {:?}", header);
							warn!("   Native result {:?}", native_result);
							warn!("   Wasm result {:?}", wasm_result);
							telemetry!(SUBSTRATE_INFO; "block.execute.consensus_failure";
								"hash" => ?hash,
								"origin" => ?origin,
								"header" => ?header
							);
							wasm_result
						}),
				let encoded_block = <Block as BlockT>::encode_from(
					import_headers.pre(),